diff --git a/.github/workflows/fesom2_icepack.yml b/.github/inactive_workflows/fesom2_icepack.yml similarity index 93% rename from .github/workflows/fesom2_icepack.yml rename to .github/inactive_workflows/fesom2_icepack.yml index aeb50481f..9057057b1 100644 --- a/.github/workflows/fesom2_icepack.yml +++ b/.github/inactive_workflows/fesom2_icepack.yml @@ -1,18 +1,18 @@ name: FESOM2_icepack -# Controls when the action will run. Triggers the workflow on push or pull request. +# Controls when the action will run. Triggers the workflow on push or pull request. on: [push, pull_request] # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: - gfortran_ubuntu: + icepack_test: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:fesom2.1 + container: koldunovn/fesom2_test:refactoring2 # Service containers to run with `gfortran_ubuntu` steps: diff --git a/.github/workflows/fesom2.1.yml b/.github/workflows/fesom2.1.yml index 4facc60cc..069a5a669 100644 --- a/.github/workflows/fesom2.1.yml +++ b/.github/workflows/fesom2.1.yml @@ -1,31 +1,40 @@ name: FESOM2 main test -# Controls when the action will run. Triggers the workflow on push or pull request. +# Controls when the action will run. Triggers the workflow on push or pull request. on: [push, pull_request] # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: - gfortran_ubuntu: + general_test: # Containers must run in Linux based operating systems runs-on: ubuntu-latest # Docker Hub image that `container-job` executes in - container: koldunovn/fesom2_test:fesom2.1 + container: koldunovn/fesom2_test:refactoring2 # Service containers to run with `gfortran_ubuntu` steps: # NK: this changes working directory to fesom2 - uses: actions/checkout@v2 - - name: Compile model + - name: Compile model (binary) run: | bash -l configure.sh ubuntu - + + - name: Compile model (library) + run: | + bash ./test/ifs_interface/configure_lib.sh -l + + - name: Library exists + run: | + bash ./test/ifs_interface/check_exist.sh + - name: Create global test run run: | mkrun pi test_pi -m docker + - name: FESOM2 global test run run: | cd work_pi @@ -35,15 +44,22 @@ jobs: run: | cd work_pi fcheck . - + + - name: Check restarts + run: | + cd work_pi + ./job_docker_new + - name: Create channel test run run: | mkrun souf test_souf -m docker + - name: FESOM2 channel test run run: | cd work_souf chmod +x job_docker_new ./job_docker_new + - name: Check channel results run: | cd work_souf @@ -53,6 +69,7 @@ jobs: run: | cd mesh_part bash -l configure.sh ubuntu + - name: Run partitioner run: | cd work_pi diff --git a/.github/workflows/fesom2_openmp.yml b/.github/workflows/fesom2_openmp.yml new file mode 100644 index 000000000..fd1e8cafa --- /dev/null +++ b/.github/workflows/fesom2_openmp.yml @@ -0,0 +1,49 @@ + +name: FESOM2 OpenMP test + +# Controls when the action will run. Triggers the workflow on push or pull request. + +on: [push, pull_request] + + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + openmp_test: + # Containers must run in Linux based operating systems + runs-on: ubuntu-latest + # Docker Hub image that `container-job` executes in + container: koldunovn/fesom2_test:refactoring2 + + # Service containers to run with `gfortran_ubuntu` + steps: + # NK: this changes working directory to fesom2 + - uses: actions/checkout@v2 + + - name: switch OpenMP ON + run: | + cd ./src/ + sed -i 's/with OpenMP\" OFF/with OpenMP\" ON/g' CMakeLists.txt + cd ../ + + - name: Compile model + run: | + bash -l configure.sh ubuntu + + - name: Create global test run with 4 OpenMP threads + run: | + mkrun pi test_pi -m docker + cd work_pi + sed -i 's/THREADS=1/THREADS=4/g' job_docker_new + cd ../ + + - name: FESOM2 global test run + run: | + cd work_pi + chmod +x job_docker_new + ./job_docker_new + + + + + + diff --git a/.gitignore b/.gitignore index f68261b2e..7e9b58fcd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ /build +/build.lib *.o *.mod *.x @@ -6,5 +7,8 @@ *~ *.swp src/icepack_drivers/Icepack +lib/*.a +lib/*.so /work_* Makefile.in +mesh_part/build diff --git a/CMakeLists.txt b/CMakeLists.txt index 95b7e7b78..5155b05d2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.4) +cmake_minimum_required(VERSION 3.9) # set default build type cache entry (do so before project(...) is called, which would create this cache entry on its own) if(NOT CMAKE_BUILD_TYPE) @@ -7,11 +7,13 @@ if(NOT CMAKE_BUILD_TYPE) endif() project(FESOM2.0) +option(BUILD_SHARED_LIBS "Build using shared libraries" OFF) # cmake-internal switch to toggle if library targets are being build as STATIC or SHARED, see https://cmake.org/cmake/help/latest/guide/tutorial/Selecting%20Static%20or%20Shared%20Libraries.html set(TOPLEVEL_DIR ${CMAKE_CURRENT_LIST_DIR}) set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs FESOM_COUPLED to work)") set(CRAY OFF CACHE BOOL "compile with cray ftn") set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") +set(OPENMP_REPRODUCIBLE OFF CACHE BOOL "serialize OpenMP loops that are critical for reproducible results") #set(VERBOSE OFF CACHE BOOL "toggle debug output") #add_subdirectory(oasis3-mct/lib/psmile) add_subdirectory(src) diff --git a/README.md b/README.md index de34d9620..c9160c82b 100644 --- a/README.md +++ b/README.md @@ -27,3 +27,12 @@ References * **[Version coupled with ECHAM6 atmosphere]** Sidorenko, D., Goessling, H. F., Koldunov, N. V., Scholz, P., Danilov, S., Barbi, D., et al ( 2019). Evaluation of FESOM2.0 coupled to ECHAM6.3: Pre‐industrial and HighResMIP simulations. Journal of Advances in Modeling Earth Systems, 11. https://doi.org/10.1029/2019MS001696 * **[Version with ICEPACK sea ice thermodynamics]** Zampieri, Lorenzo, Frank Kauker, Jörg Fröhle, Hiroshi Sumata, Elizabeth C. Hunke, and Helge Goessling. Impact of Sea-Ice Model Complexity on the Performance of an Unstructured-Mesh Sea-ice/ocean Model Under Different Atmospheric Forcings. Washington: American Geophysical Union, 2020. https://dx.doi.org/10.1002/essoar.10505308.1. + +Documentation for FESOM2 +************************ + +Here lives FESOM2 documentation + +Rendered version: https://fesom2.readthedocs.io/en/latest/ + + diff --git a/config/namelist.config b/config/namelist.config index b283fdd8d..01e14ed2e 100644 --- a/config/namelist.config +++ b/config/namelist.config @@ -23,8 +23,12 @@ ResultPath='../result_tmp/' / &restart_log -restart_length=1 !only required for d,h,s cases, y, m take 1 -restart_length_unit='y' !output period: y, d, h, s +restart_length=1 ! --> do netcdf restart ( only required for d,h,s cases, y, m take 1) +restart_length_unit='y' !output period: y, d, h, s, off +raw_restart_length=1 ! --> do core dump restart +raw_restart_length_unit='y' ! e.g. y, d, h, s, off +bin_restart_length=1 ! --> do derived type binary restart +bin_restart_length_unit='y' ! e.g. y, d, h, s, off logfile_outfreq=960 !in logfile info. output frequency, # steps / @@ -54,6 +58,7 @@ use_cavity=.false. ! use_cavity_partial_cell=.false. use_floatice = .false. use_sw_pene=.true. +flag_debug=.false. / &machine diff --git a/config/namelist.config.toy_soufflet b/config/namelist.config.toy_soufflet index b074865ba..6f63269d9 100644 --- a/config/namelist.config.toy_soufflet +++ b/config/namelist.config.toy_soufflet @@ -54,6 +54,7 @@ use_floatice = .false. use_sw_pene=.false. toy_ocean=.true. which_toy="soufflet" +flag_debug=.true. / &machine diff --git a/config/namelist.cvmix b/config/namelist.cvmix index 00754cca1..18a90c979 100644 --- a/config/namelist.cvmix +++ b/config/namelist.cvmix @@ -13,15 +13,16 @@ tke_min = 1.0e-6 ! tke_mxl_choice ... Can only be 1 or 2, choice of calculation of mixing ! length; currently only Blanke, B., P. Delecluse option is implemented tke_mxl_choice = 2 +tke_dolangmuir = .false. / -! namelist for IDEMIX +! namelist for IDEMIX von Pollman et al. (2017) ¶m_idemix -idemix_tau_v = 86400.0 ! time scale for vertical symmetrisation (sec) -idemix_tau_h = 1296000.0 ! time scale for horizontal symmetrisation +idemix_tau_v = 172800.0 ! 2days ! time scale for vertical symmetrisation (sec) +idemix_tau_h = 1296000.0 !15days ! time scale for horizontal symmetrisation idemix_gamma = 1.570 ! constant of order one derived from the shape of the spectrum in m space (dimensionless) -idemix_jstar = 10.0 ! spectral bandwidth in modes (dimensionless) -idemix_mu0 = 1.33333333 ! dissipation parameter (dimensionless) +idemix_jstar = 5.0 ! spectral bandwidth in modes (dimensionless) +idemix_mu0 = 0.33333333 ! dissipation parameter (dimensionless) idemix_sforcusage = 0.2 idemix_n_hor_iwe_prop_iter = 5 ! iterations for contribution from horiz. wave propagation idemix_surforc_file = '/work/ollie/clidyn/forcing/IDEMIX/fourier_smooth_2005_cfsr_inert_rgrid.nc' diff --git a/config/namelist.dyn b/config/namelist.dyn new file mode 100644 index 000000000..e35508f2f --- /dev/null +++ b/config/namelist.dyn @@ -0,0 +1,23 @@ +&dynamics_visc +visc_gamma0 = 0.003 ! [m/s], backgroung viscosity= gamma0*len, it should be as small a s possible (keep it < 0.01 m/s). +visc_gamma1 = 0.1 ! [nodim], for computation of the flow aware viscosity +visc_gamma2 = 0.285 ! [s/m], is only used in easy backscatter option +visc_easybsreturn= 1.5 + +opt_visc = 5 +! 5=Kinematic (easy) Backscatter +! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) +! 7=Biharmonic flow aware (viscosity depends on velocity differences) +! 8=Dynamic Backscatter + +use_ivertvisc= .true. +/ + +&dynamics_general +momadv_opt = 2 ! option for momentum advection in moment only =2 +use_freeslip = .false. ! Switch on free slip +use_wsplit = .false. ! Switch for implicite/explicte splitting of vert. velocity +wsplit_maxcfl= 1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) + ! in older FESOM it used to be w_exp_max=1.e-3 +/ + diff --git a/config/namelist.forcing b/config/namelist.forcing index 79eb1374f..c71eebf83 100644 --- a/config/namelist.forcing +++ b/config/namelist.forcing @@ -50,11 +50,14 @@ landice_end_mon=10 nm_nc_freq = 1 ! data points per day (i.e. 86400 if the time axis is in seconds) nm_nc_tmid = 0 ! 1 if the time stamps are given at the mid points of the netcdf file, 0 otherwise (i.e. 1 in CORE1, CORE2; 0 in JRA55) l_xwind=.true. l_ywind=.true. l_humi=.true. l_qsr=.true. l_qlw=.true. l_tair=.true. l_prec=.true. l_mslp=.false. l_cloud=.false. l_snow=.true. - nm_runoff_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/CORE2_runoff.nc' runoff_data_source ='CORE2' !Dai09, CORE2 + nm_runoff_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/CORE2_runoff.nc' !nm_runoff_file ='/work/ollie/qwang/FESOM2_input/mesh/CORE2_finaltopo_mean/forcing_data_on_grid/runoff_clim.nc' !runoff_data_source ='Dai09' !Dai09, CORE2, JRA55 !runoff_climatology =.true. - nm_sss_data_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/PHC2_salx.nc' sss_data_source ='CORE2' + nm_sss_data_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/PHC2_salx.nc' + chl_data_source ='None' !'Sweeney' monthly chlorophyll climatology or 'NONE' for constant chl_const (below). Make use_sw_pene=.TRUE. in namelist.config! + nm_chl_data_file ='/work/ollie/clidyn/forcing/Sweeney/Sweeney_2005.nc' + chl_const = 0.1 / diff --git a/config/namelist.forcing.ncep2 b/config/namelist.forcing.ncep2 new file mode 100644 index 000000000..7dcc0ef59 --- /dev/null +++ b/config/namelist.forcing.ncep2 @@ -0,0 +1,56 @@ +! This is the namelist file for forcing + +&forcing_exchange_coeff +Ce_atm_oce=0.00175 ! exchange coeff. of latent heat over open water +Ch_atm_oce=0.00175 ! exchange coeff. of sensible heat over open water +Cd_atm_oce=0.001 ! drag coefficient between atmosphere and water +Ce_atm_ice=0.00175 ! exchange coeff. of latent heat over ice +Ch_atm_ice=0.00175 ! exchange coeff. of sensible heat over ice +Cd_atm_ice=0.0012 ! drag coefficient between atmosphere and ice +Swind =0.0 ! parameterization for coupled current feedback +/ + +&forcing_bulk +AOMIP_drag_coeff=.false. +ncar_bulk_formulae=.true. +ncar_bulk_z_wind=10.0 ! height at which wind forcing is located (CORE, JRA-do: 10m, JRA, NCEP:2m) +ncar_bulk_z_tair=2.0 ! height at which temp forcing is located (CORE, JRA-do: 10m, JRA, NCEP:2m) +ncar_bulk_z_shum=2.0 ! height at which humi forcing is located (CORE, JRA-do: 10m, JRA, NCEP:2m) +/ + +&land_ice +use_landice_water=.false. +landice_start_mon=5 +landice_end_mon=10 +/ + +&nam_sbc + nm_xwind_file = '/work/ollie/clidyn/forcing/NCEP2/uwnd.10m.gauss.' ! name of file with winds, if nm_sbc=2 + nm_ywind_file = '/work/ollie/clidyn/forcing/NCEP2/vwnd.10m.gauss.' ! name of file with winds, if nm_sbc=2 + nm_humi_file = '/work/ollie/clidyn/forcing/NCEP2/shum.2m.gauss.' ! name of file with 2m specific humidity + nm_qsr_file = '/work/ollie/clidyn/forcing/NCEP2/dswrf.sfc.gauss.' ! name of file with solar heat + nm_qlw_file = '/work/ollie/clidyn/forcing/NCEP2/dlwrf.sfc.gauss.' ! name of file with Long wave + nm_tair_file = '/work/ollie/clidyn/forcing/NCEP2/air.2m.gauss.' ! name of file with 2m air temperature + nm_prec_file = '/work/ollie/clidyn/forcing/NCEP2/prate.sfc.gauss.' ! name of file with rain fall + nm_snow_file = '' ! name of file with snow fall + nm_mslp_file = '' ! air_pressure_at_sea_level + nm_xwind_var = 'uwnd' ! name of variable in file with wind + nm_ywind_var = 'vwnd' ! name of variable in file with wind + nm_humi_var = 'shum' ! name of variable in file with humidity + nm_qsr_var = 'dswrf' ! name of variable in file with solar heat + nm_qlw_var = 'dlwrf' ! name of variable in file with Long wave + nm_tair_var = 'air' ! name of variable in file with 2m air temperature + nm_prec_var = 'prate' ! name of variable in file with total precipitation + nm_snow_var = '' ! name of variable in file with total precipitation + nm_mslp_var = '' ! name of variable in file with air_pressure_at_sea_level + nm_nc_iyear = 1800 + nm_nc_imm = 1 ! initial month of time axis in netCDF + nm_nc_idd = 1 ! initial day of time axis in netCDF + nm_nc_freq = 24 ! data points per day (i.e. 86400 if the time axis is in seconds) + nm_nc_tmid = 0 ! 1 if the time stamps are given at the mid points of the netcdf file, 0 otherwise (i.e. 1 in CORE1, CORE2; 0 in JRA55) + l_xwind=.true., l_ywind=.true., l_humi=.true., l_qsr=.true., l_qlw=.true., l_tair=.true., l_prec=.true., l_mslp=.false., l_cloud=.false., l_snow=.false. + nm_runoff_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/CORE2_runoff.nc' + runoff_data_source ='CORE2' !Dai09, CORE2, JRA55 + nm_sss_data_file ='/work/ollie/clidyn/forcing/JRA55-do-v1.4.0/PHC2_salx.nc' + sss_data_source ='CORE2' +/ diff --git a/config/namelist.io b/config/namelist.io index 0a3270c4a..4ad3a8b96 100644 --- a/config/namelist.io +++ b/config/namelist.io @@ -9,8 +9,9 @@ ldiag_DVD =.false. ldiag_forc =.false. / -&nml_listsize -io_listsize=100 !number of streams to allocate. shallbe large or equal to the number of streams in &nml_list +&nml_general +io_listsize =100 !number of streams to allocate. shallbe large or equal to the number of streams in &nml_list +vec_autorotate =.false. / ! for sea ice related variables use_ice should be true, otherewise there will be no output @@ -22,8 +23,8 @@ io_listsize=100 !number of streams to allocate. shallbe large or equal to the nu io_list = 'sst ',1, 'm', 4, 'sss ',1, 'm', 4, 'ssh ',1, 'm', 4, - 'uice ',1, 'm', 4, - 'vice ',1, 'm', 4, + 'uice ',1, 'd', 4, + 'vice ',1, 'd', 4, 'a_ice ',1, 'm', 4, 'm_ice ',1, 'm', 4, 'm_snow ',1, 'm', 4, @@ -37,6 +38,8 @@ io_list = 'sst ',1, 'm', 4, 'Kv ',1, 'y', 4, 'u ',1, 'y', 4, 'v ',1, 'y', 4, + 'unod ',1, 'y', 4, + 'vnod ',1, 'y', 4, 'w ',1, 'y', 4, 'Av ',1, 'y', 4, 'bolus_u ',1, 'y', 4, diff --git a/config/namelist.oce b/config/namelist.oce index af71c7741..7af6867f7 100644 --- a/config/namelist.oce +++ b/config/namelist.oce @@ -2,27 +2,8 @@ &oce_dyn C_d=0.0025 ! Bottom drag, nondimensional -gamma0=0.003 ! [m/s], backgroung viscosity= gamma0*len, it should be as small as possible (keep it < 0.01 m/s). -gamma1=0.1 ! [nodim], for computation of the flow aware viscosity -gamma2=0.285 ! [s/m], is only used in easy backscatter option -Div_c=.5 ! the strength of the modified Leith viscosity, nondimensional, 0.3 -- 1.0 -Leith_c=.05 ! the strength of the Leith viscosity -visc_option=5 ! 1=Harmonic Leith parameterization; - ! 2=Laplacian+Leith+biharmonic background - ! 3=Biharmonic Leith parameterization - ! 4=Biharmonic flow aware - ! 5=Kinematic (easy) Backscatter - ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) - ! 7=Biharmonic flow aware (viscosity depends on velocity differences) - ! 8=Dynamic Backscatter -easy_bs_return= 1.5 ! coefficient for returned sub-gridscale energy, to be used with visc_option=5 (easy backscatter) A_ver= 1.e-4 ! Vertical viscosity, m^2/s scale_area=5.8e9 ! Visc. and diffus. are for an element with scale_area -mom_adv=2 ! 1=vector CV, p1 vel, 2=sca. CV, 3=vector inv. -free_slip=.false. ! Switch on free slip -i_vert_visc=.true. -w_split=.false. -w_max_cfl=1.0 ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) ! in older FESOM it used to be w_exp_max=1.e-3 SPP=.false. ! Salt Plume Parameterization Fer_GM=.true. ! to swith on/off GM after Ferrari et al. 2010 K_GM_max = 2000.0 ! max. GM thickness diffusivity (m2/s) @@ -43,51 +24,3 @@ mix_scheme='KPP' ! vertical mixing scheme: KPP, PP Ricr = 0.3 ! critical bulk Richardson Number concv = 1.6 ! constant for pure convection (eqn. 23) (Large 1.5-1.6; MOM default 1.8) / - -&oce_tra -use_momix = .true. ! switch on/off !Monin-Obukhov -> TB04 mixing -momix_lat = -50.0 ! latitidinal treshhold for TB04, =90 --> global -momix_kv = 0.01 ! PP/KPP, mixing coefficient within MO length -use_instabmix = .true. ! enhance convection in case of instable stratification -instabmix_kv = 0.1 -use_windmix = .false. ! enhance mixing trough wind only for PP mixing (for stability) -windmix_kv = 1.e-3 -windmix_nl = 2 - -smooth_bh_tra =.false. ! use biharmonic diffusion (filter implementation) for tracers -gamma0_tra = 0.0005 ! gammaX_tra are analogous to those in the dynamical part -gamma1_tra = 0.0125 -gamma2_tra = 0. - -diff_sh_limit=5.0e-3 ! for KPP, max diff due to shear instability -Kv0_const=.true. -double_diffusion=.false. ! for KPP,dd switch -K_ver=1.0e-5 -K_hor=3000. -surf_relax_T=0.0 -surf_relax_S=1.929e-06 ! 50m/300days 6.43e-07! m/s 10./(180.*86400.) -balance_salt_water =.true. ! balance virtual-salt or freshwater flux or not -clim_relax=0.0 ! 1/s, geometrical information has to be supplied -ref_sss_local=.true. -ref_sss=34. -i_vert_diff =.true. ! true -tra_adv_hor ='MFCT' !'MUSCL', 'UPW1' -tra_adv_ver ='QR4C' !'QR4C', 'CDIFF', 'UPW1' -tra_adv_lim ='FCT' !'FCT', 'NONE' (default) -tra_adv_ph = 1. ! a parameter to be used in horizontal advection (for MUSCL it is the fraction of fourth-order contribution in the solution) -tra_adv_pv = 1. ! a parameter to be used in horizontal advection (for QR4C it is the fraction of fourth-order contribution in the solution) -! Implemented trassers (3d restoring): -! 301 - Fram strait. -! 302 - Bering Strait -! 303 - BSO -num_tracers=2 !number of all tracers -tracer_ID =0,1 !their IDs (0 and 1 are reserved for temperature and salinity) -/ - -&oce_init3d ! initial conditions for tracers -n_ic3d = 2 ! number of tracers to initialize -idlist = 1, 0 ! their IDs (0 is temperature, 1 is salinity, etc.). The reading order is defined here! -filelist = 'phc3.0_winter.nc', 'phc3.0_winter.nc' ! list of files in ClimateDataPath to read (one file per tracer), same order as idlist -varlist = 'salt', 'temp' ! variables to read from specified files -t_insitu = .true. ! if T is insitu it will be converted to potential after reading it -/ diff --git a/config/namelist.tra b/config/namelist.tra new file mode 100644 index 000000000..9108013b7 --- /dev/null +++ b/config/namelist.tra @@ -0,0 +1,49 @@ +&tracer_listsize +num_tracers=100 !number of tracers to allocate. shallbe large or equal to the number of streams in &nml_list +/ + +&tracer_list +nml_tracer_list = +1 , 'MFCT', 'QR4C', 'FCT ', 1., 1., +2 , 'MFCT', 'QR4C', 'FCT ', 1., 1., +!101, 'UPW1', 'UPW1', 'NON ', 0., 0. +/ + +&tracer_init3d ! initial conditions for tracers +n_ic3d = 2 ! number of tracers to initialize +idlist = 2, 1 ! their IDs (0 is temperature, 1 is salinity, etc.). The reading order is defined here! +filelist = 'phc3.0_winter.nc', 'phc3.0_winter.nc' ! list of files in ClimateDataPath to read (one file per tracer), same order as idlist +varlist = 'salt', 'temp' ! variables to read from specified files +t_insitu = .true. ! if T is insitu it will be converted to potential after reading it +/ + +&tracer_general +! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). +smooth_bh_tra =.false. ! use biharmonic diffusion (filter implementation) for tracers +gamma0_tra = 0.0005 ! gammaX_tra are analogous to those in the dynamical part +gamma1_tra = 0.0125 +gamma2_tra = 0. +i_vert_diff =.true. +/ + +&tracer_phys +use_momix = .true. ! switch on/off !Monin-Obukhov -> TB04 mixing +momix_lat = -50.0 ! latitidinal treshhold for TB04, =90 --> global +momix_kv = 0.01 ! PP/KPP, mixing coefficient within MO length +use_instabmix = .true. ! enhance convection in case of instable stratification +instabmix_kv = 0.1 +use_windmix = .false. ! enhance mixing trough wind only for PP mixing (for stability) +windmix_kv = 1.e-3 +windmix_nl = 2 +diff_sh_limit=5.0e-3 ! for KPP, max diff due to shear instability +Kv0_const=.true. +double_diffusion=.false. ! for KPP,dd switch +K_ver=1.0e-5 +K_hor=3000. +surf_relax_T=0.0 +surf_relax_S=1.929e-06 ! 50m/300days 6.43e-07! m/s 10./(180.*86400.) +balance_salt_water =.true. ! balance virtual-salt or freshwater flux or not +clim_relax=0.0 ! 1/s, geometrical information has to be supplied +ref_sss_local=.true. +ref_sss=34. +/ diff --git a/configure_any.sh b/configure_any.sh new file mode 100755 index 000000000..7c63dae66 --- /dev/null +++ b/configure_any.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# custom build script in use at ECMWF + +set -e + +LIB=no +while getopts "l" OPT +do + case "$OPT" in + l) LIB=yes;; + esac +done +shift $((OPTIND-1)) + +source env.sh # source this from your run script too + +if [[ ${LIB} = yes ]]; then + mkdir build.lib || true # build dir for library + cd build.lib + cmake -DBUILD_FESOM_AS_LIBRARY=ON .. # not required when re-compiling + sed -i -e 's/-lFALSE//g' src/CMakeFiles/fesom.dir/link.txt # workaround for the moment on cray +else + mkdir build || true # build dir for binary + cd build + cmake .. # not required when re-compiling +fi +make install -j`nproc --all` diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..d4bb2cbb9 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/css/custom.css b/docs/_static/css/custom.css new file mode 100644 index 000000000..11b47c4b7 --- /dev/null +++ b/docs/_static/css/custom.css @@ -0,0 +1,20 @@ +/* Copied from MITgcm version: */ +/* https://github.com/jahn/altMITgcm/blob/master/doc/_static/css/custom.css */ +/* Make equation numbers float to the right */ +.eqno { + margin-left: 5px; + float: right; +} +/* Hide the link... */ +.math .headerlink { + display: none; + visibility: hidden; +} +/* ...unless the equation is hovered */ +.math:hover .headerlink { + display: inline-block; + visibility: visible; + /* Place link in margin and keep equation number aligned with boundary */ + margin-right: -0.7em; +} + diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..eb459b927 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,116 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'fesom2' +copyright = '2021, FESOM2 team' +author = u'Sergey Danilov, Dmitry Sidorenko, Nikolay Koldunov, Patrick Scholz, Qiang Wang, Thomas Rackow, Helge Goessling and Lorenzo Zampieri' + +# The full version, including alpha/beta/rc tags +release = '0.2' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinx.ext.mathjax', 'sphinxcontrib.bibtex'] +bibtex_bibfiles = ['mybib_fesom2.bib'] + +numfig = True +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The master toctree document. +master_doc = 'index' + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +html_logo = 'fesom_logo.png' + +project = u'FESOM2' +copyright = u'2014-, FESOM2 contributors' + + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'FESOM2doc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + 'papersize': 'a4paper', + + # The font size ('10pt', '11pt' or '12pt'). + # + 'pointsize': '12pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + 'preamble': r''' + \setcounter{secnumdepth}{3} + ''', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'FESOM2.tex', u'FESOM2 Documentation', + u'Sergey Danilov, Dmitry Sidorenko, \\and Nikolay Koldunov, Patrick Scholz, \\and Qiang Wang, Thomas Rackow, \\and Helge Goessling, Lorenzo Zampieri', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'fesom2', u'FESOM2 Documentation', + [author], 1) +] + + +def setup(app): + app.add_css_file('css/custom.css') diff --git a/docs/data_processing/data_processing.rst b/docs/data_processing/data_processing.rst new file mode 100644 index 000000000..fc908b6ae --- /dev/null +++ b/docs/data_processing/data_processing.rst @@ -0,0 +1,136 @@ +.. _chap_data_processing: + +Data pre/post processing +************************ + +netCDF files for initial conditions +=================================== + +The netCDF files have to satisfy the following criteria: + +- should have DIMENSIONS named ``lon/longitude/LON`` , ``lat/latitude/LAT`` and ``depth`` +- should have VARIABLES named ``lon/longitude/LON`` , lat/latitude/LAT and ``depth`` +- ``lon/lat`` dimentions should be one dimentional (e.g ``lon(lon)``) +- each variable with initial conditions should have only three dimentions (e.g. ``temp(depth, lat, lon)``) +- The fields should start from ``0th`` meridian and longitudes should have values from ``0 to 360`` +- the missing values should have values larger than ``1e11`` + +The file that would be read potentially without problems can be created with the following python code (variables lat,lon_reshaped, depth, salt_reshaped, temp_reshaped should be prepeared from the original file): + +.. code-block:: python + + from netCDF4 import Dataset + + fw = Dataset('woa18_netcdf.nc', 'w', ) + + fw.createDimension('time', 1) + fw.createDimension('lat', lat.shape[0]) + fw.createDimension('lon', lon_reshaped.shape[0]) + fw.createDimension('depth', depth.shape[0]) + + latitude = fw.createVariable('lat', 'd', ('lat',)) + latitude[:] = lat[:] + + longitude = fw.createVariable('lon', 'd', ('lon',)) + longitude[:] = lon_reshaped[:] + + ddepth = fw.createVariable('depth', 'd', ('depth',)) + ddepth[:] = depth[:] + + salinity = fw.createVariable('salt','d', ('depth', 'lat', 'lon'), fill_value= 1e+20) + salinity[:] = salt_reshaped[:] + salinity.missing_value = 1e+20 + + temperature = fw.createVariable('temp','d', ('depth', 'lat', 'lon'), fill_value= 1e+20) + temperature[:] = temp_reshaped[:] + temperature.missing_value = 1e+20 + +We will try to provide convertion instructions in the form of jupyter notebooks to all files with initial conditions. + + +Convert grid to netCDF that CDO understands +=========================================== + +We are going to use ``spheRlab`` for conversion. You have to have **R** already installed. + +Clone ``spheRlab``: + +:: + + git clone https://github.com/FESOM/spheRlab.git spheRlab + +Build package: + +:: + + cd spheRlab/ + R CMD build spheRlab + +Make sure you have cdo installed (``cdo -V``) and launch R (type ``R``). + +Install the package: + +:: + + R>install.packages("spheRlab_1.1.0.tar.gz",repos=NULL) + +If you don't have netCDF library installed, you also have to do: + +:: + + R>install.packages("ncdf4") + +Load libraries: + +:: + + R>library(spheRlab) + R>library(ncdf4) + +You can get help (for any function) by typing, e.g.: + +:: + + R>?sl.grid.writeCDO + +Define path to the mesh: + +:: + + R>meshpath="/work/ollie/dsidoren/input/fesom2.0/meshes/mesh_CORE2_final/" + +Read the grid in to R structure (the arguments rot etc. might be different for different meshes, but this is the standard): + +For rotated meshes: + +:: + + R>grid = sl.grid.readFESOM(griddir=meshpath,rot=TRUE,rot.invert=TRUE,rot.abg=c(50,15,-90)) + +For unrotated meshes: + +:: + + R>grid = sl.grid.readFESOM(griddir=meshpath,rot=FALSE,rot.invert=FALSE,rot.abg=c(0,0,0), threeD=FALSE) + +Define path to the output file: + +:: + + R>ofile = paste0(meshpath, "sl.grid.CDO", sep = "") + +Directrly write netCDF file with mesh description: + +:: + + R>sl.grid.writeCDO(grid, ofile=ofile, netcdf=TRUE, depth=FALSE) + +Conservative remapping with cdo (interpolate topography to mesh) +---------------------------------------------------------------- +:: + + $bash> export MESHPATH=/work/ollie/dsidoren/input/fesom2.0/meshes/mesh_CORE2_final/ + $bash> export DATAPATH=/work/ollie/dsidoren/ETOPO5/etopo5_lonlat.nc + $bash> cdo remapycon,$MESHPATH/sl.grid.CDO.nc -selname,topo $DATAPATH $MESHPATH/topo.nc + + diff --git a/docs/fesom_logo.png b/docs/fesom_logo.png new file mode 100644 index 000000000..24acc742c Binary files /dev/null and b/docs/fesom_logo.png differ diff --git a/docs/forcing_configuration.rst b/docs/forcing_configuration.rst new file mode 100644 index 000000000..b4a34ff21 --- /dev/null +++ b/docs/forcing_configuration.rst @@ -0,0 +1,81 @@ +.. _chap_forcing_configuration: + +Forcing configuration (namelist.forcing) +**************************************** + +Sections of the namelist +======================== + +Section &forcing_exchange_coeff +""""""""""""""""""""""""""""""" + +- **Ce_atm_oce=1.75e-3** Exchange coeff. of latent heat over open water. +- **Ch_atm_oce=1.75e-3** Exchange coeff. of sensible heat over open water. +- **Cd_atm_oce=1.0e-3** Drag coefficient between atmosphere and water. +- **Ce_atm_ice=1.75e-3** Exchange coeff. of latent heat over ice. +- **Ch_atm_ice=1.75e-3** Exchange coeff. of sensible heat over ice. +- **Cd_atm_ice=1.2e-3** Drag coefficient between atmosphere and ice. + +Section &forcing_bulk +""""""""""""""""""""" + +- **AOMIP_drag_coeff=.false.** +- **ncar_bulk_formulae=.true.** + + +Section &land_ice +""""""""""""""""" + +**use_landice_water=.false.** +**landice_start_mon=5** +**landice_end_mon=10** + +Section &nam_sbc +"""""""""""""""" + +Forcing file names should be in the form of ``variable.year.nc```. In the namelist you provide a full path to the file and ``variable.`` name in the form of:: + + nm_xwind_file = '/path/to/forcing/CORE2/u_10.' + +- **nm_xwind_file=''** Name of the file with winds. +- **nm_ywind_file=''** Name of the file with winds. +- **nm_humi_file=''** Name of the file with humidity. +- **nm_qsr_file=''** Name of the file with solar heat. +- **nm_qlw_file=''** Name of the file with Long wave. +- **nm_tair_file=''** Name of the file with 2m air temperature. +- **nm_prec_file=''** Name of the file with total precipitation. +- **nm_snow_file=''** Name of the file with snow precipitation. +- **nm_mslp_file=''** Name of the file with air pressure at sea level. +- **nm_xwind_var=''** Name of the variable in file with wind. +- **nm_ywind_var=''** Name of the variable in file with wind. +- **nm_humi_var=''** Name of the variable in file with humidity. +- **nm_qsr_var=''** Name of the variable in file with solar heat. +- **nm_qlw_var=''** Name of the variable in file with Long wave. +- **nm_tair_var=''** Name of the variable in file with 2m air temperature. +- **nm_prec_var=''** Name of the variable in file with total precipitation. +- **nm_snow_var=''** Name of the variable in file with total precipitation. +- **nm_mslp_var=''** Name of the variable in file with air pressure at sea level. +- **nm_nc_iyear=1948** First year of the forcing. +- **nm_nc_imm=1** Initial month of time axis in netCDF. +- **nm_nc_idd=1** Initial day of time axis in netCDF. +- **nm_nc_freq=1** Data points per day (i.e. 86400 if the time axis is in seconds) +- **nm_nc_tmid=1** It's 1 if the time stamps are given at the mid points of the netcdf file, 0 otherwise (i.e. 1 in CORE1, CORE2; 0 in JRA55). + +The following options control if the forcing files for particular variables are used or not. + +- **l_xwind=.true.** +- **l_ywind=.true.** +- **l_humi=.true.** +- **l_qsr=.true.** +- **l_qlw=.true.** +- **l_tair=.true.** +- **l_prec=.true.** +- **l_mslp=.false.** +- **l_cloud=.false.** +- **l_snow=.true.** + + +- **nm_runoff_file=''** Name of the runoff file. +- **runoff_data_source ='CORE2'** Other options are ``Dai09``, ``CORE2`` +- **nm_sss_data_file=''** Name of the sea surface salinity restoring data file. +- **sss_data_source='CORE2'** \ No newline at end of file diff --git a/docs/general_configuration/general_configuration.rst b/docs/general_configuration/general_configuration.rst new file mode 100644 index 000000000..7232885d1 --- /dev/null +++ b/docs/general_configuration/general_configuration.rst @@ -0,0 +1,89 @@ +.. _chap_general_configuration: + +General configuration (namelist.config) +*************************************** + +General configuation is defined in the ``namelist.conf``. Here you define time stepping and restart frequency, details of the ALE and mesh geometry. + +Sections of the namelist +======================== + +Section &modelname +"""""""""""""""""" + +- **runid='fesom'** define name of the run. It will be used as part of the file name in model output and restarts. Don't change it if you don't have a good reason, since many post processing scripts assume it to be ``fesom``. + +Section ×tep +""""""""""""""""" + +- **step_per_day=32** define how many steps per day the model will have. The variable ``step_per_day`` must be an integer multiple of 86400 ``(mod(86400,step_per_day)==0)``. Valid values are, for example: 32(45min), 36(40min), 48(30min), 60(24min), 72(20min), 144(10min), 288(5min), 1440(1min). +- **run_length= 62** length of the run in ``run_length_unit``. +- **run_length_unit='y'** units of the ``run_length``. Valid values are year (``y``), month (``m``), day (``d``), and model time step (``s``). + +Section &clockinit +"""""""""""""""""" + +- **timenew=0.0**, **daynew=1**, **yearnew=1948** gives the seconds, day and year of the initialisation time point, respectively. If the initialisation time is identical with the first line of the clock file runid.clock the model performs a cold start. If the initialisation time and the first line of the clock file are not identical the model assumes that a restart file must exist and tries to do a warm start. + + +Section &paths +"""""""""""""" + +- **Meshpath=''**, path to your mesh directory +- **ClimateDataPath=''**, path to the location of your 3D initialisation data for temperatur and salinity. +- **Resultpath=''**, directory where your results should be stored + + +Section &restart_log +"""""""""""""""""""" + +- **restart_length=1**, how often should restart file be written in units of ``restart_length_unit`` +- **restart_length_unit='y'** units of the ``restart_length``. Valid values are year (``y``), month (``m``), day (``d``), and model time step (``s``). +- **logfile_outfreq=960** the frequency (in timesteps), the model state information should be written into the job monitor .log/.out file. + + +Section &ale_def +"""""""""""""""" + +- **which_ALE='linfs'**, which Arbitrary Lagrangian Eulerian (ALE) approach should be used? Options are 1) ``linfs`` - vertical grid is fixed in time, 2) ``zlevel`` - only the surface layer is allowed to move with the change in ssh all other levels are fixed in time 3) ``zstar`` - all layers, except the bottom layer are allowed to move, the change in ssh is equally distributed over all layers. It is recommended to use either ``linfs`` or ``zstar``. +- **use_partial_cell=.false.**, switch if partial bottom cells should be used or not. Partial cell means that the bottom layer thickness can be different from the full depth levels to be closer to the real bottom topography +- **min_hnode=0.5**, for ``zlevel``: layer thickness should not become smaller than min_hnode [in fraction from 0 to 1] of original layer thickness. If it happens switch from ``zlevel`` to local ``zstar``. +- **lzstar_lev=4**, for ``zlevel`` in case min_hnode criteria is reached over how many level should ssh change be distributed for local zstar +- **max_ice_loading=5.0**, for ``use_floatice=.True.`` in case of floating sea ice how much ice loading is allowed [unit m] the excess is discarded + +Section &geometry +""""""""""""""""" + +- **cartesian =.false.**, use flat cartesian coordinates (idealized geometry) +- **fplane =.false.**, use fplane approximation, coriolis force is lat independent coriolis=2*omega*0.71 +- **rotated_grid =.true.**, should the model perform on rotated grid. +- **force_rotation=.false.**, if input mesh is unrotated it must be rotated in FESOM2.0 than ``.true.``, if input mesh is already rotated ``.false.`` + +- **alphaEuler =50.0**, rotated Euler angles, alpha [degree] +- **betaEuler =15.0**, rotated Euler angles, beta [degree] +- **gammaEuler =-90.0**, rotated Euler angles, gamma [degree] + + +Section &calendar +""""""""""""""""" + +- **include_fleapyear=.false.**, should be ``.true.`` when the forcing contains fleapyears (i.e. NCEP...) + + +Section &run_config +""""""""""""""""""" + +- **use_ice =.true.**, simulate ocean + sea ice +- **use_floatice=.false.**, allow floating sea ice only possible with ``zlevel`` or ``zstar`` +- **use_sw_pene =.true.**, use parameterisation for short wave penetration. Incoming short wave radiation is distributed over several layers + + +Section &machine +"""""""""""""""" + +- **n_levels = 2**, number of hierarchy level for mesh partitioning +- **n_part = 12, 36**, number of partitions on each hierarchy level, the last number should optimal corresponds with the number of cores per computational node + + + + diff --git a/docs/geometry.rst b/docs/geometry.rst new file mode 100644 index 000000000..d3d823b12 --- /dev/null +++ b/docs/geometry.rst @@ -0,0 +1,72 @@ +.. _geometry: + +Geometry +******** + +The placement of variables +========================== + +FESOM2 uses a cell-vertex placement of variables in the horizontal directions. The 3D mesh structure is defined by the surface triangular mesh and a system of (moving) level surfaces which split a triangular column in a set of smaller triangular prisms bounded by levels. In a horizontal plane, the horizontal velocities are located at cell (triangle) centroids, and scalar variables are at mesh (triangle) vertices. The vector control volumes are the prisms based on mesh surface cells, and the prisms based on median-dual control volumes are used for scalars (temperature, salinity, pressure and elevation). The latter are obtained by connecting cell centroids with edge midpoints, as illustrated in :numref:`labelgeometry`. The same cell-vertex placement of variables is also used in FVCOM :cite:`FVCOM`, however FESOM2 differs in almost every numerical aspect, including the implementation of time stepping, scalar and momentum advection and dissipation (see below). + +.. _labelgeometry: +.. figure:: img/fig_geometry.png + + Schematic of cell-vertex discretization (left) and the edge-based structure (right). The horizontal velocities are located at cell (triangle) centers (red circles) and scalar quantities (the elevation, pressure, temperature and salinity) are at vertices (blue circles). The vertical velocity and the curl of horizontal velocity (the relative vorticity) are computed at the scalar locations too. Scalar control volumes (here the volume around vertex :math:`v_1` is shown) are obtained by connecting the cell centers with midpoints of edges. Each cell is characterized by the sets of its vertices :math:`V(c)` which is :math:`(v_1,v_2,v_3)` for :math:`c=c_1` and the set of its nearest neighbors :math:`N(c)`. For :math:`c=c_1`, :math:`N(c)` includes :math:`c_2`, :math:`c_6` and a triangle (not shown) across the edge formed by :math:`v_2` and :math:`v_3`. One can also introduce :math:`C(v)` which is :math:`(c_1,c_2,c_3,c_4,c_5,c_6)` for :math:`v=v_1`, and other possible sets. Edge :math:`e` (right panel) is characterized by the set of its vertices :math:`V(e)=(v_1,v_2)` and the ordered set of cells :math:`C(e)=(c_1,c_2)` with :math:`c_1` on the left. The edge vector :math:`{\bf l}_e` connects vertex :math:`v_1` to vertex :math:`v_2`. The edge cross-vectors :math:`{\bf d}_{ec_1}` and :math:`{\bf d}_{ec_2}` connect the edge midpoint to the respective cell centers. + + +In the vertical direction, the horizontal velocities and scalars are +located at mid-levels. The velocities of inter-layer exchange (vertical velocities for flat layer surfaces) are located at full layers and at scalar points. :numref:`vertical` illustrates this arrangement. + +The layer thicknesses are defined at scalar locations (to be consistent with the elevation). There are also auxiliary layer thicknesses at the horizontal velocity locations. They are interpolated from the vertex layer thicknesses. + +.. _vertical: +.. figure:: img/fig_vertical.png + + Schematic of vertical discretization. The thick line represents the bottom, the thin lines represent the layer boundaries and vertical faces of prisms. The location of variables is shown for the left column only. The blue circles correspond to scalar quantities (temperature, salinity, pressure), the red circles to the horizontal velocities and the yellow ones to the vertical exchange velocities. The bottom can be represented with full cells (three left columns) or partial cells (the next two). The mesh levels can also be terrain following, and the number of layers may vary (the right part of the schematic). The layer thickness in the ALE procedure may vary in prisms above the blue line. The height of prisms in contact with the bottom is fixed. + + +The cell-vertex discretization of FESOM2 can be viewed as an analog of an Arakawa B-grid (see also below) while that of FESOM1.4 is an analog of A-grid. The cell-vertex discretization is free of pressure modes, which would be excited on the A-grid unless stabilized. However, the cell-vertex discretization allows spurious inertial modes because of excessively many degrees of freedom used to represent the horizontal velocities. They need to be filtered by the horizontal viscosity. Triangular A and B grids work on arbitrary triangular meshes in contrast to C-grids which require orthogonal meshes. + +Notation +======== + +For convenience of model description we introduce the following notation. +Quantities defined at cell centroids will be denoted with the lower index :math:`c`, and the quantities at vertices will be denoted with the lower index :math:`v`. The vertical index :math:`k` will appear as the first index, but it will be suppressed if this does not lead to ambiguities. The agreement is that the layer index increases downwards. The indices may appear in pairs or in triples. Thus the pair :math:`kc` means the vertical layer (or level for some quantities) :math:`k` and cell :math:`c`, and the triple :math:`kcv` means that the quantity relates to layer (level) :math:`k`, cell :math:`c` and vertex :math:`v` of this cell. We use the notation :math:`C(v)` for the set of cells that contain vertex :math:`v`, :math:`V(c)` for the set of vertices of cell :math:`c`, :math:`E(v)` for the set of edges emanating from vertex :math:`v` and so on. Each edge :math:`e` is characterized by its vertices :math:`V(e)`, the neighboring cells :math:`C(e)`, the length vector :math:`{\bf l}_e` directed from the first vertex in :math:`V(e)` to the second one and two cross-edge vectors :math:`{\bf d}_{ec}` directed from the edge center to the cell center of the left and right cells respectively (see :numref:`labelgeometry`). The cells in the set :math:`C(e)` are ordered so that the first one is on the left of the vector :math:`{\bf l}_e`. The boundary edges have only one (left) cell in the set :math:`C(e)`. The total number of vertices, cells and edges will be denoted as :math:`V, C, E` respectively. + + +Earth sphericity +================ + +We use spherical coordinate system with the north pole displaced to Greenland (commonly 75°N, 50°W). A local Carthesian reference frame is used on each cell with cellwise-constant metric coefficients (cosine of latitude). Gradients of scalar quantities and cell areas are computed with respect to local coordinates. The vectors :math:`{\bf d}_{ec}` are stored in local physical measure of respective cells :math:`c` for they always enter in combination with velocity (defined on cells) to give normal transports. Vectors :math:`{\bf l}_e` are stored in radian measure. Whenever their physical length is required, it is computed based on the mean of cosines on :math:`C(e)`. We will skip other details of spherical geometry (metric terms in momentum advection etc.) and ignore the difference in the representation of :math:`{\bf l}_e` (radian measure) and :math:`{\bf d}_{ec}` (physical measure) for brevity below. The :math:`x` and :math:`y` directions should be understood as local zonal and meridional directions. + +In contrast to regular-mesh models there is no cyclic boundary in FESOM meshes which commonly cover the entire ocean. + + +Bottom representation +===================== + +The bottom topography is commonly specified at scalar points because the elevation is defined there. However, for discretizations operating with full velocity vectors, this would imply that velocity points are also at topographic boundaries. In this case the only safe option is to use the no-slip boundary conditions, similar to the traditional B-grids. To avoid this constraint, we use the cellwise representation of bottom topography (same as in FESOM1.4). In this case velocity points never touch bottom and both no-slip and free slip boundary conditions are possible. Boundary conditions are implemented through ghost cells which are obtained from the boundary elements by reflection with respect to the boundary face (edge in 2D). + +The drawback of the elementwise bottom representation is that the total thickness is undefined at scalar points if the bottom is stepwise (geopotential vertical coordinate). The motion of level surfaces of the ALE vertical coordinate at each scalar location is then limited to the layers that do not contact the bottom topography (above the blue line in :numref:`vertical`). This is related to the implementation of partial cells which is much simpler if the thickness of the bottom layer stays fixed. +The layer thickness :math:`h_{kv}` is dynamically updated at scalar points (vertices) in the layers that are affected by the ALE algorithm and interpolated to the cells + +.. math:: + h_{kc}=(1/3)\sum_{v\in V(c)}h_{kv}. + + +The cell thicknesses :math:`h_{kc}` enter the discretized equations as the products with horizontal velocities. + +Because of cell-wise bottom representation, triangular prisms pointing into land (two lateral faces touch the land) may occur at certain levels on *z*-coordinate meshes even though such prisms were absent along the coast. Such prisms lead to instabilities in practice and have to be excluded. The opposite situation with land prisms pointing into the ocean is much less dangerous, yet it is better to avoid it too. We adjust the number of layers under each surface triangle at the stage of mesh design to exclude such potentially dangerous situations. This issue is absent in FESOM1.4 because of the difference in the placement of horizontal velocities and the necessity to use no-slip boundary conditions. Since the number of cells is nearly twice as large as the number of vertices, the cell-wise bottom representation may contain more detail than can be resolved by the field of vertical velocity. This may trigger extra noise in layers adjacent to the bottom. + +Partial cells +============= + +Partial cells on *z*-coordinate meshes are naturally taken into account in the ALE formulation (see below) because it always deals with variable layer thicknesses (heights of prisms). If :math:`K_{c}` is the number of layers under cell :math:`c`, we define + +.. math:: + K_{v}^+=\max_{c\in C(v)}K_{c},\quad K_{v}^-=\min_{c\in C(v)}K_{c}. + + +If the layer thickness are varied in the ALE procedure, this is limited to :math:`K_{v}^--1` layers. With this agreement, the thickness of the lowest layer on cells is kept as initially prescribed. In this case the implementation of partial cells reduces to taking the thicknesses of the lowest layers on cells as dictated by the bottom topography unless they are too thick (the real depth is deeper than the deepest standard level by more than half thickness of the last standard layer), in which case we bound them. The heights of scalar control prisms in the layers below :math:`K_{v}^-` are formally undefined, but their volumes are strictly defined, and thicknesses can be considered as the volume-mean ones if needed. Scalar and vector quantities defined at mid-layers are kept at their standard locations. This avoids creating spurious pressure gradients. The partial cells then work through the modified transports crossing the faces of control volumes. Since the horizontal velocities are located at cells, the transports entering scalar control volumes are uniquely defined. For vector control volumes the areas of vertical faces may be different on two prisms meeting through the face. Taking the minimum area to compute fluxes is the safest option in this case. + + diff --git a/docs/getting_started/getting_started.rst b/docs/getting_started/getting_started.rst new file mode 100644 index 000000000..c544a1b7c --- /dev/null +++ b/docs/getting_started/getting_started.rst @@ -0,0 +1,423 @@ +.. _chap_getting_started: + +Getting Started with FESOM2 +*************************** + +This chapter describes several ways of getting started with FESOM2. First we show a minimum set of comands that will lead to a working setup on systems where FESOM2 is used activelly. We also have instructions for Docker/Singularity and Ubuntu. + +TL;DR version for supported HPC systems +======================================= + +Supported systems are: generic ``ubuntu``, ``ollie`` at AWI, ``mistral`` at DKRZ, ``JURECA`` at JSC, ``HLRN``, ``Hazel Hen``, ``Marinostrum 4`` at BSC. During configuration the system will be recognised and apropriate environment variables and compiler options should be used. +:: + + git clone https://github.com/FESOM/fesom2.git + cd fesom2 + bash -l configure.sh + +Create file fesom.clock in the output directory with the following content (if you plan to run with COREII foring): + +:: + + 0 1 1948 + 0 1 1948 + +after that one has to adjust the run script for the target sustem and run it: +:: + + cd work + sbatch job_ollie + +Detailed steps of compiling and runing the code +=============================================== + +The following section assumes you are located on one of the supported HPC systems. To install FESOM2 on your local machine we recoment to use `Docker based installation`_ and read about `Necessary Ubuntu packages`_ if you decide not to use Docker. + +First thing is to checkout FESOM2 code from the repository. The code is developed in open repository on GitHub_. + +.. _GitHub: https://github.com/FESOM/fesom2/ + +Build model executable with Cmake +--------------------------------- + +Clone the GitHub repository with a git command: + +:: + + git clone https://github.com/FESOM/fesom2.git + + +The repository contains model code and two additional libraries: `Metis` (domain partitioner) and `Parms` (solver), necessary to run FESOM2. To build FESOM2 executable one have to compile Parms library and the code of the model (`src` folder). In order to build executable that is used for model domain partitioning (distribution of the model mesh between CPUs) one have to compile `Metis` library and also some code located in the src directory (see :ref:`partitioning`). Building of the model executable and the partitioner is usually done automatically with the use of CMake. If you going to build the code not on one of the supported platforms (ollie, DKRZ, HLRN, and HAZELHEN, general Ubuntu), you might need to do some (usually small) modifications described in `Adding new platform for compilation`_ section. + +Change to the `fesom2` folder and execute: + +:: + + bash -l ./configure.sh + +In the best case scenario, your platform will be recognized and the Parms library and model executable will be built and copied to the bin directory. If something went wrong have a look at Troubleshooting_ section. + +If you would like to select platform manually (which is nessesary in the case of Ubuntu, for eample), type: + +:: + + bash -l ./configure.sh ubuntu + + +Data and mesh files +------------------- + +The FESOM2 repository contains only very small example meshes and data (in the ``test`` directory, see the note below). However, if you want to run realistic simulations, you ether have to have them on your system, or download an archive with sample data. THere is a chance that your system already have some of the necesseary files, you can check it in the ``setups/paths.yml`` file. If not, the easiest way to start is to download example set from `DKRZ cloud`_ (12 Gb) by executing: + +:: + + curl https://swift.dkrz.de/v1/dkrz_035d8f6ff058403bb42f8302e6badfbc/FESOM2.0_tutorial/FESOM2_one_year_input.tar > FESOM2_one_year_input.tar + +and untar: + +:: + + tar -xvf FESOM2_one_year_input.tar + +You will have a folder named ``FESOM2_one_year_input`` that contains all the data you need to do initial run of the model. The `mesh` directory contains two meshes: ``pi`` and ``core2``. The ``pi`` mesh is very small global FESOM2 mesh, that can run relativelly fast even on a laptop. The ``CORE`` mesh is our 1 degree equivalent mesh and is used in many tuning and testing studies. Mesh folders already include several prepared partitionings (``dist_`` folders), so you don't have to worry about partitioning during your first steps with FESOM. + +The ``input`` folder contains files with initial conditions (``phc3.0``) and atmospheric forcing (``JRA55``) for one year (1958). + +.. note:: You can find more standard FESOM2 meshes in https://gitlab.awi.de/fesom . Download instructions are available in each mesh repository. + + +.. _DKRZ cloud: https://swiftbrowser.dkrz.de/download/FESOM2.0_tutorial/FESOM2_one_year_input.tar + +.. note:: The FESOM2 distribution contains minimal set of data to run the model in the ``test`` directory, namelly ``pi`` and ``soufflet`` (channel) meshes, WOA13 initial conditions and CORE2 forcing data for one day. Those are mainly used for testing, and require a bit more involved modification of namelists. For more details see instructions on `Docker based installation`_. + + +Preparing the run +------------------ + +You have to do several basic things in order to prepare the run. First, create a directory where results will be stored. Usually, it is created in the model root directory: + +:: + + mkdir results + +you might make a link to some other directory located on the part of the system where you have a lot of storage. In the results directory, you have to create ``fesom.clock`` file (NOTE, if you change ``runid`` in ``namelist.config`` to something like ``runid=mygreatrun``, the file will be named ``mygreatrun.clock``). Inside the file you have to put two identical lines: + +:: + + 0 1 1958 + 0 1 1958 + +This is initial date of the model run, or the time of the `cold start` of your model. More detailed explanation of the clock file will be given in the `The clock file`_ section. + +The next step is to make some changes in the model configuration. All runtime options can be set in the namelists that are located in the config directory: + +:: + + cd ../config/ + +There are several configuration files, but we are only interested in the ``namelist.config`` for now. The options that you might want to change for your first FESOM2 run are: + +- ``run_length`` length of the model run in run_length_unit (see below). +- ``run_length_unit`` units of the run_length. Can be ``y`` (year), ``m`` (month), ``d`` (days), ``s`` (model steps). +- ``MeshPath`` - path to the mesh you would like to use (e.g. ``/youdir/FESOM2_one_year_input/mesh/pi/``, slash at the end is important!) +- ``ClimateDataPath`` - path to the folder with the file with model temperature and salinity initial conditions (e.g. ``/youdir/FESOM2_one_year_input/input/phc3.0/``). The name of the file with initial conditions is defined in `namelist.oce`, but during first runs you probably don't want to change it. + +More detailed explination of options in the ``namelist.config`` is in the section :ref:`chap_general_configuration`. + +Running the model +----------------- + +Change to the ``work`` directory. You should find several batch scripts that are used to submit model jobs to different HPC machines. The scripts also link ``fesom.x`` executable to the ``work`` directory and copy namelists with configurations from config folder. + +.. note:: + Model executable, namelists and job script have to be located in the same directory (usually ``work``). + +If you are working on AWI's ``ollie`` supercomputer, you have to use ``job_ollie``, in other case use the job script for your specific platform, or try to modify one of the existing ones. + +.. note:: + One thing you might need to adjust in the job files is the number of cores, you would like to run the model on. For example, for SLURM it will be adjusting ``#SBATCH --ntasks=288`` value, and for simple ``mpirun`` command, that we have for ``job_ubuntu`` it will be argument for the ``-n`` option. It is necessary, that your mesh has the corresponding partitioning (``dist_xx`` folder, where ``xx`` is the number of cores). + +On ``ollie`` the submission of your job is done by executing the following command: + +:: + + sbatch job_ollie + +The job is then submitted. In order to check the status of your job on ollie you can execute: + +:: + + squeue -u yourusername + +Results of the model run should appear in the ``results`` directory that you have specified in the ``namelist.config``. After the run is finished the ``fesom.clock`` file (or if you change your runid, ``runid.clock``) will be updated with information about the time of your run's end, that allows running the next time portion of the model experiment by just resubmitting the job with ``sbatch job_ollie``. + +Other things you need to know earlier on +======================================== + +The clock file +-------------- + +The clock file is located in your output directory (specified in ``ResultPath`` option of ``namelist.config``) and controls the time. At the start of a new experiment that we want to initialize from climatology (a so-called cold start), the ``fesom.clock`` file would usually look like this: + +:: + + 0 1 1958 + 0 1 1958 + +In this example, ``1958`` is the first available year of the atmospheric ``JRA55`` forcing. The two identical lines tell the model that this is the start of the experiment and that there is no restart file to be read. Also make sure that the ``yearnew`` option of the ``namelist.config`` is set to the year you would like the cold start to begin (1958 in this case). + +Let's assume that we run the model with a timestep of 30 minutes (= 1800 seconds) for a full year (1948). After the run is successfully finished, the clock file will then automatically be updated and look like this: + +:: + + 84600.0 365 1958 + 0.0 1 1958 + +where the first row is the second of the day of the last time step of the model, and the second row gives the time when the simulation is to be continued. The first row indicates that the model ran for 365 days (in 1958) and 84600 seconds, which is ``1 day - 1`` FESOM timestep in seconds. In the next run, FESOM2 will look for restart files for the year 1958 and continue the simulation at the 1st of January in 1959. + + +Tricking FESOM2 into accepting existing restart files +----------------------------------------------------- +The simple time management of FESOM2 allows to easily trick FESOM2 to accept existing restart files. Let's assume that you have performed a full ``JRA55`` cycle until the year 2019 and you want to perform a second cycle, restarting from the last year of the first cycle. This can be done by (copying and) renaming the last year into: + +:: + + mv fesom.2019.ice.nc fesom.1957.ice.nc + mv fesom.2019.oce.nc fesom.1957.oce.nc + +by changing the clock file into: + +:: + + 84600.0 365 1957 + 0.0 1 1958 + +In case the second cycle starts again at the very first year (e.g. 1958 in ``JRA55``) of the forcing, namelist.config needs to be modified, otherwise the model will always perform a cold start in 1958 instead of restarting from the 1957 restart files: + +:: + + &clockinit + timenew=0.0 + daynew=1 + yearnew=1957 + + + +.. _partitioning: + +Build partitioner executable +---------------------------- + +First meshes you will use probably will come with several predefined partitionings (``dist_XXXX`` folders). However at some point you might need to create partitioning yourself. To do so you have to first compile the partitioner. First you change to the ``mesh_part`` directory: + +:: + + cd mesh_part + +if you work on the one of the supported systems, you shoule be able to execute: + +:: + + bash -l ./configure.sh + +or, in case of the Ubuntu, or other customly defined system: + +:: + + bash -l ./configure.sh ubuntu + +The ``cmake`` should build the partitioner for you. If your system is not supported yet, have a look on how to add custom system in `Adding new platform for compilation`_. The executable ``fesom_ini.x`` should now be available in ``bin`` directory. Now you can proceed with `Running mesh partitioner`_. + + +Running mesh partitioner +------------------------ + +You have to do this step only if your mesh does not have partitioning for the desired number of cores yet. You can understand if the partitioning exists by the presence of the ``dist_XXXX`` folder(s) in your mesh folder, where XXX is the number of CPUs. If the folder contains files with partitioning, you can just skip this step. + +Partitioning is going to split your mesh into pieces that correspond to the number of cores you going to request. Now FESOM2 scales until 300 vertices per core, further increase in the amount of cores will probably have relatively small effect. + +In order to tell the partitioner how many cores you need the partitioning for, one has to edit ``&machine`` section in the ``namelist.config`` file (see also :ref:`chap_general_configuration`). There are two options: ``n_levels`` and ``n_part``. FESOM mesh can be partitioned with use of several hierarchy levels and ``n_levels`` define the number of levels while ``n_part`` the number of partitions on each hierarchy level. The simplest case is to use one level and ``n_part`` just equal to the number of cores and we recoment to use it at the beggining: + +:: + + n_levels=1 + n_part= 288 + +This will prepear your mesh to run on 288 computational cores. + +In order to run the partitioner change to the ``work`` directory. You should find several batch scripts that are used to submit partitioner jobs to HPC machines (have ``_ini_`` in their names). The scripts also links ``fesom_ini.x`` executable to the ``work`` directory and copy namelists with configurations from ``config`` folder (for partitioner we actually need only ``namelist.config``, but scripts copy everything). + +.. note:: + For the partitioner to run, the ``fesom_ini.x`` executable, configuration namelists (in particular ``namelist.config``) and job script have to be located in the same directory (usually ``work``). + +If you are working on AWI's ``ollie`` supercomputer, you have to use ``job_ini_ollie``, in other case use the job script for your specific HPC platform, or try to modify one of the existing ones. For relativelly small meshes (up to 1M nodes) and small partitions it is usually fine just to run the partitioner on a login node (it is serial anyway), like this: + +:: + + ./fesom_ini.x + +.. note:: + Make sure that you have the same enviroment that was used during compilation of ``fesom_ini.x``. Usually the easiest way to do this is to first (example for ``ollie`` platform):: + + source ../env/ollie/shell + + + This file (``shell``) is used to setup the environment during the compilation of both ``fesom_ini.x`` and ``fesom.x``. + +If you trying to partition large mesh, then on ``ollie`` for example the submission of your partitioning job is done by executing the following command: + +:: + + sbatch job_ini_ollie + + +Model spinup / Cold start at higher resolutions +----------------------------------------------- + +Cold start of the model at high mesh resolutions with standard values for timestep and viscosity will lead to instabilities that cause the model to crash. If no restart files are available and a spinup has to be performed, the following changes should be made for the first month long simulation and then adjusted gradually over the next 6-8 months: + +- First thing to try, that usually helps, is to set in the ``namelist.oce``:: + + w_split=.true. + +- Try to reduce the timestep in ``namelist.config``, for example to: + + :: + + step_per_day=720 + + or even lower (e.g. value 1440 will lead to 1 minute timestep). + +.. note:: + Make sure that for the high resolution runs (with mesh resolution over considerable portions of the domain finer than 25-10 km) you don't use the combination of default "Easy Backscatter" vescosity (``visc_option=5``) and ``easy_bs_return= 1.5``. This is true not only for the spinup, but for the whole duration of the run. The "Easy Backscatter" option works very good on low resolution meshes, but for high resolution meshes (eddy resolving) it makes more harm than good. If you would like to use ``visc_option=5`` for high resolution runs, put ``easy_bs_return= 1.0``. + + +- In ``namelist.oce`` make sure that ``visc_option`` is set to 7 or 5 (see also the note above about option 5) and increase ``gamma1`` to something like: + + :: + + gamma1=0.8 + + +or even higher. After running for about a month try to reduce it. If you change the values of run lengh and restart output frequency (which you probably want to do during the spinup, to run for short periods), don't forget to change them back in the ``namelist.config``: + +:: + + run_length= 1 + run_length_unit='m' + ... + restart_length=1 + restart_length_unit='m' + +Increase the timestep gradually. Very highly resolved meshes may require an inital timestep of one-two minutes or even less. + +Adding new platform for compilation +----------------------------------- + +In order to add a new platform for compilation, you simply have to specify the computational environment. In a simplest case this requires: + +- To edit the ``env.sh`` file. +- To add a folder with the name of the platform to the ``env`` folder and put the ``shell`` file with enrionment setup. + +In the ``env.sh`` file you have to add one more ``elif`` statement in to the ``if`` control stucture, where the platform (let's call it ``mynewhost``) is selected:: + + elif [[ $LOGINHOST = mynewhost ]]; then + STRATEGY="mynewhost" + +As you can see in the ``env.sh`` file some host systems are authomatically identified by using regular expressions, but the simpliest way is just to explicitly provide the name of the host system. + +The next step is to create additional folder in the ``env`` folder:: + + mkdir ./env/mynewhost + +and add a file name with the name ``shell`` to it. This file will be sourced before the compilation, so you can setup the environment (bash syntax) in it. Please have a look at the ``shell`` file in other folders for examples. Now you should be able to do:: + + bash -l ./configure.sh mynewhost + +to do the compilation. + +If you are lucky this will be everything you need. However in more complicated cases one had to adjust CMake files (``CMakeLists.txt`` located in folders), so the knowlege of CMake is required. + +Change compiler options +----------------------- + +Compiler options for FESOM2 code can be changed in the ``./src/CMakeLists.txt`` file. Currently the defenition of compiler options for Intel compiler looks like:: + + if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -init=zero) + +At present only Intel and GNU compilers are supported, but the user can realtivelly easy add options by following the same pattern. + + +Troubleshooting +=============== + +Error ``can not determine environment for host:`` +------------------------------------------------- + +If you on Ubuntu system, add ``ubuntu`` as input parameter for ``configure.sh``: + +:: + + ./configure.sh ubuntu + +Otherwise you have to add another system - have a look at `Adding new platform for compilation`_ section. + +Model blows up +-------------- + +There could by many reasons for this, but the first thing to try is to reduce time step or/and increase model viscosity for short period of time. Have a look at `Model spinup / Cold start at higher resolutions`_ for instructions. + + +Docker based installation +========================= + +The best way to run the model locally is to use Docker container. You obviously have to have Docker installed for your system. The Docker image we are going to use have all necessary libraries installed plus have the ``mkrun`` python script (`Docker file`_), that helps to create FESOM2 configurations. As a result of the steps below, you will run ``pi`` mesh for one day using data files that comes with the model. + +.. _Docker file: https://github.com/FESOM/FESOM2_Docker/tree/master/fesom2_test + +- Get the image:: + + docker pull koldunovn/fesom2_test:fesom2.1 + +- Go to the folder with your version of fesom2 folder (NOT inside fesom2 folder, one up, the one you run ``git clone https://github.com/FESOM/fesom2.git`` in). +- Run:: + + docker run -it -v "$(pwd)"/fesom2:/fesom/fesom2 koldunovn/fesom2_test:fesom2.1 /bin/bash + +- This should get you inside the container. You now can edit the files in your fesom2 folder (on host system), but run compule and run the model inside the container. +- When inside the container, to compile do: + + :: + + cd fesom2 + bash -l configure.sh ubuntu + +- To prepare the run (this will do the test with pi mesh):: + + mkrun pi test_pi -m docker + +- To run the model: + + :: + + cd work_pi/ + ./job_docker_new + +As a next step you can modify the setup in ``work_pi`` to try different parameters. You can also follow the steps described in `Detailed steps of compiling and runing the code`_. To make your life a bit easier place ``FESOM2_one_year_input`` in the ``fesom2`` folder, so that the data are available inside the container. You also can generate setup that would use ``JRA55`` forcing, and adjust it - this will save you some time on editing ``namelist.forcing``, since original setup in ``work_pi`` folder use old ``CORE2`` forcing. + + :: + + mkrun pi_jra55 test_pi -m docker -f JRA55 + +Necessary Ubuntu packages +========================= + +Here is the list of packages you need to install on ``Ubuntu`` to compile and run FESOM2. Should work (with adjustments for package managers and names) for other linux distributions. + + :: + + apt-get -y install make gfortran gcc g++ libblas-dev libopenmpi-dev + apt-get -y install cmake vim git libnetcdf-dev libnetcdff-dev libpmi2-pmix + + diff --git a/docs/icepack_in_fesom.rst b/docs/icepack_in_fesom.rst new file mode 100644 index 000000000..3c48191f4 --- /dev/null +++ b/docs/icepack_in_fesom.rst @@ -0,0 +1,161 @@ +.. _icepack_in_fesom: + +Icepack sea ice configuration +***************************** + +This section describes the implementation of the Icepack sie ice column physics package in the FESOM2 model. The scope of the following paragraphs is to provide a practical guide to users interested in detailed simulations of the sea ice system with FESOM2, and not to describe the scientific features of Icepack. A detailed description of the sea ice parameterizations here implemented can be found on the website of the `CICE Consortium `_, which maintains and continuously develops this package. + +.. attention:: + The Icepack implementation in FESOM2 is still in a testing phase and we cannot guarantee a bugfree code nor good scientific results. + +.. note:: + To get more information regardng the implementation of Icepack in FESOM2, to report bugs, or to get advice regarding the model setup do not hesitate to open an issue on the FESOM2 GitHub repository or to contact Lorenzo Zampieri at lorenzo(dot)zampieri(at)awi(dot)de. + + You are invited to update and develop further this documentation by pushing your changes to the `FESOM2 Documentation repository `_ on GitHub. + +General information +=================== + +Icepack–the column physics package of the sea-ice model CICE–is a collection of physical parameterizations that account for thermodynamic and mechanic sub-grid processes not explicitly resolved by the hosting sea-ice model, in our case FESOM2. The modular implementation of Icepack allows the users to vary substantially the complexity of the sea-ice model, with the possibility of choosing between several schemes and a broad set of active and passive tracers that describe the sea-ice state. Icepack v1.2.1 has been implemented in FESOM2 and can be used as an alternative to the standard FESIM thermodynamic module. As the standard FESIM implementation, the Icepack column-physics subroutines run every ocean time step. All the Icepack variables are defined directly on the nodes of the FESOM2 mesh, ensuring an optimal consistency between the ocean and the sea-ice components of the model. The inclusion of Icepack in FESOM2 required a revision of the calling sequence within the sea-ice model, which now follows that of the CICE model as illustrated in :numref:`call_seq`. + +.. _call_seq: +.. figure:: img/call_seq.png + + Schematic describing the calling sequences of the Standard FESOM2 and FESOM2-Icepack implementations. + +Icepack is licensed for use through the CICE Consortium. Therefore, we encourage the FESOM2 userbase interested in the Icepack features to be aware of the `License `_ when working with this model configuration. We report here a disclaimer from the `Icepack website `_. + +.. important:: + Icepack releases are “functional releases” in the sense that the code runs, does not crash, passes various tests, and requires further work to establish its scientific validity. In general, users are not encouraged to use any of the CICE Consortium’s model configurations to obtain “scientific” results. The test configurations are useful for model development, but sea ice models must be evaluated from a physical standpoint in a coupled system because simplified configurations do not necessarily represent what is actually happening in the fully coupled system that includes interactive ocean and atmosphere components. + +How to cite +""""""""""" + +The current Icepack version implemented in FESOM2 is Icepack 1.2.1. To acknowledge the development work behind the implementation of Icepack in FESOM2 please cite `Zampieri et al. (2021) `_, part of which used to compile this documentation, and `Hunke et al. (2020) `_, in addition to the usual FESOM2 papers. + +Implementation +============== + +The implementation of Icepack in FESOM2 is fully modular, meaning that the users are free to vary the configuration via namelist parameters. When Icepack is used, ``namelist.icepack`` controls all settings related to the sea ice subgrid parameterizations, thus overriding the content of ``namelist.ice``. The dynamics (EVP) and advection schemes are still controlled by the standard ``namelist.ice``. Below we describe some of the most important namelist parameters, while we recommend consulting the `official Icepack documentation `_ for a more comprehensive description. + +Namelist section &env_nml +""""""""""""""""""""""""" + +- **nicecat** Defines the number of sea ice thickness categories. +- **nfsdcat** Defines the number of categories of the floe size distribution. This parameter should be set to 1 as the floe size distribution has never been tested in FESOM2. +- **nicelyr** and **nsnwlyr** Defines the number of vertical layers in sea ice and snow. + +.. attention:: + Increasing substantially the number of thickness classes and vertical layers can lead to numerical instabilities (very thin vertical layers), memory issues, very large output files, and finally to a substantial slow down of the model because of the high number of tracers that need to be advected. + +Namelist section &grid_nml +"""""""""""""""""""""""""" + +- **kcatbound** Specifies which criteria is followed to discretize the Ice Thickness Distribution (ITD). Setting **kcatbound** equal to 0, 1, or 3 gives lower thickness boundaries for any number of thickness categories. Setting **kcatbound=2** corresponds to the World Meteorological Organization ITD classification, and it is compatible only with **nicecat=5,6,7**. + +Namelist section &tracer_nml +"""""""""""""""""""""""""""" + +Logical parameters to specify parameterizations and passive tracers. Only **tr_pond_cesm** has been tested extensively. + +Namelist section &nml_list_icepack +"""""""""""""""""""""""""""""""""" + +It regulates the type, frequency, and precision of the output for Icepack variables. Most of the Icepack variables can be defined as average over the grid cell (e.g. **aice**: average sea ice area fraction – 2D variable), or separately for each thickness class (e.g. **aicen**: sea ice area fraction in each thickness class – 3D variable), with the ITD information saved as a vertical dimension in the netCDF file. At the moment, variables defined over multiple vertical layers are output in separated files. For example, in a model configuration with **n** sea ice vertical layers, activating the **qice** output stream will lead to **n** files where ``qice_i.fesom.yyyy.nc`` contains the sea ice enthalpy of the **i**-*th* vertical layer averaged over the ITD. Similarly, activating the **qicen** output stream will lead to **n** files where ``qicen_i.fesom.yyyy.nc`` contains the sea ice enthalpy of the **i**-*th* sea ice vertical layer for each thickness class. + +Compatibility with FESOM2 configurations +"""""""""""""""""""""""""""""""""""""""" + +In `Zampieri et al. (2020) `_ the model was run with linear free surfaces (**which_ALE=’linfs’**), and other ALE coordinates have not been tested. In principle, Icepack should be independent of the scheme used to solve the sea ice dynamics. However, at the moment only the standard EVP is supported, while the mEVP and aEVP still show some strange behaviors. We are working on solving this issue as well as on testing further setups, and we will update this document as soon as progress is made. + +Compilation +=========== + +Compiling FESOM2 with Icepack is very easy if you are already used to the FESOM2 workflow. After cloning fesom2 from the GitHub repository, download the Icepack single column package: +:: + + cd src/icepack_drivers/ + bash -l download_icepack.sh +The next step is to activate the Icepack flag in ``CMakeLists.txt`` by setting **USE_ICEPACK** from **OFF** to **ON**. At this point, you can proceed with the usual compilation via +:: + + bash -l configure.sh +The compilation of this FESOM2 version with the ESM Tools is not yet supported. + +Running the model +================= + +Running FESOM2 with Icepack is not different from the standard case. Make sure to add the ``namelist.icepack`` file to your ``work`` directory. Two diagnostic files are generated in addition to the standard ``fesom2.0.out``. ``icepack.diagnostics`` contains information about the Icepack configuration such as the value of some parameters, the tracers employed, and the boundaries of the ITD. ``icepack.errors`` possibly contains diagnostic information about errors in Icepack that can occur during the model run. Information about the running time are given in ``fesom2.0.out`` with the usual division in **dynamics**, **thermodynamics**, and **advection**. + +The model output is saved in the result folder together with the standard ocean output. Note that outputting sea ice information using the standard FESIM variables (**a_ice**, **m_ice**, **m_snow**, etc.) is still possible also when using Icepack. These variables are consistent with the Icepack sea ice description (**a_ice** = **aice**, **m_ice** = **vice**, **m_snow** = **vsno**). An additional restart file is generated for Icepack, ``fesom.yyyy.icepack.restart.nc``, and it is written with the same frequency as ``fesom.yyyy.oce.restart.nc`` and ``fesom.yyyy.ice.restart.nc``. + +.. attention:: + Restarting the model after changing the number of ice thickness classes, the vertical discretization of ice and/or snow, and the number of passive tracers is currently not possible. Also, changing the thermodynamic and melt pond schemes during the run is not recommended. In these cases consider a cold start and repeat your spinup run. + +Code structure +============== + +Icepack is a single column model and therefore its subroutines act on one grid cell. The Icepack code is downloaded from a separate repository (see instructions on how to compile the model) and is located in ``src/icepack_drivers/Icepack/columnphysics/``. To integrate this code in a host General Circulation Model (GCM), in our case FESOM2, additional instructions are needed to define an interface between the two systems and to drive the Icepack subroutines. This interface is contained in the ``src/icepack_drivers/icedrv_*.F90`` files, which are part of the FESOM2 repository, and will be briefly described in the following section. + +Icepack drivers +""""""""""""""" + +- ``icedrv_main.F90`` This file contains the main module of the Icepack drivers. All the variables are declared here, together with the interface of the subroutines contained in various submodules. If new variables or subroutines need to be added to the code, this is a good place to start. Try to maintain all the variables private to increase the modularity of the code, and use the transfer interface to exchange variables with FESOM2. + +- ``icedrv_set.F90`` This file contains few subroutines that initialize the model parameters by reading the Icepack namelists or alternatively by extracting default values from the Icepack package. Furthermore, ``icepack.diagnostics`` is written here, and the sea ice state is initialized in case of a cold start of the model. + +- ``icedrv_allocate.F90`` This file contains subroutines that allocate the Icepack variables declared in ``icedrv_main.F90``. + +- ``icedrv_init.F90`` This file contains subroutines that initialize the Icepack variables declared in ``icedrv_main.F90`` and allocated in ``icedrv_allocate.F90``. + +- ``icedrv_step.F90`` This file contains few subroutines that describe the calling sequence of the sea ice model when Icepack is used in FESOM2. + +- ``icedrv_advection.F90`` This file contains few subroutines that advect the Icepack tracers. If new parameterization or options are explored, you should check if the relative tracers are advected properly. + +- ``icedrv_transfer.F90`` This file contains subroutines that describe the procedure to pass information between FESOM2 and Icepack. + +- ``icedrv_io.F90`` This file contains subroutines that describe the I/O streams for the Icepack variables, including restart procedures. If new parameterization or options are explored, you should check if the relative tracers are restarted properly. + +- ``icedrv_kinds.F90`` This file declares some standard types for variable declarations. + +- ``icedrv_system.F90`` This file contains subroutines that handle model errors inside Icepack, possibly stopping the model run, and that output warning messages when appropriate. + +- ``icedrv_constants.F90`` This file defines some constants that are used in the Icepack drivers. + +Communication between Icepack and FESOM2 +"""""""""""""""""""""""""""""""""""""""" + +The Icepack environment is separated from the rest of FESOM2 and consists of a single big module with multiple submodules. Almost all the variables are private and are not visible by the FESOM2 code. The variables exchange between Icepack and FESOM2 takes place through the passing subroutines ``fesom_to_icepack`` and ``icepack_to_fesom``. + +Frequently asked questions +========================== + +Should I use Icepack for my simulations? +"""""""""""""""""""""""""""""""""""""""" + +It depends on your scientific questions. Icepack might be a good option if you are interested in sea ice processes in polar regions. In principle, the employment of Icepack should not negatively affect the ocean state but could make FESOM2 slower. + +Is FESOM2 slower when run with Icepack? +""""""""""""""""""""""""""""""""""""""" + +Yes, the model integration is slower for two reasons: 1. The sea ice subgrid parameterizations are more complex compared to the standard FESIM. 2. Much more sea-ice tracers need to be advected. Overall, the sea ice component of FESOM2 becomes approximately four times slower with Icepack. Including additional output related to a more complex sea ice description can also contribute to deteriorating the model performances. + +Which EVP scheme should I use with Icepack? +"""""""""""""""""""""""""""""""""""""""""" + +In principle, Icepack should be independent of the scheme used to solve the sea ice dynamics. However, at the moment only the standard EVP is supported, while the mEVP and aEVP still exhibit some strange behaviors. We are working on solving this issue and we will update this document as soon as progress is made. + +Can Icepack be configured as the standard FESIM? +"""""""""""""""""""""""""""""""""""""""""""""""" + +Yes, in principle it is possible to run Icepack with a single thickness class and with the 0-layer thermodynamics. However, the results obtained during the testing phase with this configuration were not very convincing and they seemed not compatible with the standard FESOM2 results. More investigations are needed to understand the cause of this behavior, which is likely related to a different implementation of the thermodynamic processes in the model. + +Can Icepack be used in coupled configurations? +"""""""""""""""""""""""""""""""""""""""""""""" + +No, at the moment FESOM2 with Icepack has not been coupled with atmospheric models. A coupling with OpenIFS is planned and might be released in the upcoming months. + +Can Icepack be used with data assimilation? +""""""""""""""""""""""""""""""""""""""""""" + +No, at the moment FESOM2 with Icepack has not been equipped with data assimilation capabilities. diff --git a/docs/img/call_seq.png b/docs/img/call_seq.png new file mode 100644 index 000000000..8cb28e0c3 Binary files /dev/null and b/docs/img/call_seq.png differ diff --git a/docs/img/fig_geometry.pdf b/docs/img/fig_geometry.pdf new file mode 100755 index 000000000..077c92a94 Binary files /dev/null and b/docs/img/fig_geometry.pdf differ diff --git a/docs/img/fig_geometry.png b/docs/img/fig_geometry.png new file mode 100644 index 000000000..8436f3e1e Binary files /dev/null and b/docs/img/fig_geometry.png differ diff --git a/docs/img/fig_vertical.pdf b/docs/img/fig_vertical.pdf new file mode 100755 index 000000000..82a296164 Binary files /dev/null and b/docs/img/fig_vertical.pdf differ diff --git a/docs/img/fig_vertical.png b/docs/img/fig_vertical.png new file mode 100644 index 000000000..49e8422e9 Binary files /dev/null and b/docs/img/fig_vertical.png differ diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 000000000..8dfe72d41 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,93 @@ +.. fesom2 documentation master file, created by + sphinx-quickstart on Sat Sep 28 22:37:42 2019. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +FESOM2 documentation +==================== + +Authors + ------- + + Sergey Danilov, Dmitry Sidorenko, Nikolay Koldunov, Patrick Scholz, Qiang Wang, Thomas Rackow, Helge Goessling and Lorenzo Zampieri + + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + getting_started/getting_started + general_configuration/general_configuration + ocean_configuration/ocean_configuration + forcing_configuration + seaice_configuration + icepack_in_fesom + data_processing/data_processing + geometry + meshes/meshes + main_equations + vertical_discretization + temporal_discretization + spatial_discretization + time_stepping_transport + subcycling_instead_solver + isoneutral_diffusion_triangular_prisms + zreferences + + +Proposed structure: + +:: + + Introduction + Getting started + TL;DR version for supported HPC systems + Detailed steps of compiling and runing the code + Ubuntu based Docker container (to get first impression of the model) + Troubleshooting + Looking at the results + Notebooks that comes with the model + pyfesom2 (short description with link to documentation) + Tutorials + Build model on Ubuntu (Video) + Add new output variable (Video) + First look at model output (Video) + General configuration (namelist.configure) + Time stepping + Restarts + ALE options + Mesh geometry and partitioning + Ocean configuration (namelist.oce) + Ocean dynamics + Ocean tracers + Adding a new tracer + Initial conditions + Sea ice configuration (namelist.ice) + Ice dynamics + Ice thermodynamics + Atmospheric forcing (namelist.forcing) + Output (namelist.io) + Adding new output variable + Meshes + Mesh format + Mesh generation? + Partitioning + MESSY + Hierarchical partitioning + Data pre/post processing + Initial conditions + Convert grid to netCDF that CDO understands + Discretizations and Algorithms + Coupling interfaces + To atmosphere + To ocean biogeochemistry? + Example experiments + FAQ + History + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/isoneutral_diffusion_triangular_prisms.rst b/docs/isoneutral_diffusion_triangular_prisms.rst new file mode 100644 index 000000000..52b2d26ac --- /dev/null +++ b/docs/isoneutral_diffusion_triangular_prisms.rst @@ -0,0 +1,159 @@ +.. _isoneutral_diffusion_triangular_prisms: + +Isoneutral diffusion on triangular prisms +***************************************** + +A rigorous implementation of isoneutral diffusion operator is a difficult task because one has to ensure that it provides decay of discrete variance and that it is exactly zero when applied to the isoneutral density. + +The horizontal and vertical components of fluxes related to (\ref{eq:kiso}) are + +.. math:: + {\bf F}_h(T)=-K_i(\nabla_h T+{\bf s}\partial_z T), + + +.. math:: + F_z(T)=-K_i({\bf s}\nabla_h T+s^2\partial_zT)-K_d\partial_zT. + +The terms including :math:`K_i` are referred to as the isoneutral flux, the +remaining term with :math:`K_d` is the dianeutral flux. +To complete the description, the slope :math:`{\bf s}` has to be expressed in terms of thermal expansion and saline contraction coefficients :math:`\alpha` +and :math:`\beta`, + +.. math:: + {\bf s}=-\frac{-\alpha\nabla_h T+\beta \nabla_h S}{-\alpha\partial_z + T+\beta\partial_z S}. + +(Do not mix this :math:`\alpha` with the other one used in the time stepping.) + +The implementation difficulty stems from the fact that the tracers together with :math:`\alpha` and :math:`\beta` are located at mid-layers, the vertical derivatives are located at the level surfaces, and the horizontal derivatives are at mid-layers, but at cells instead of vertices. The estimate of slope at a single point is impossible without extra interpolation, which will break full consistency. The solution involves 'triads' (see, e.g., :cite:`Griffiesbook` and :cite:`Lemarie2012a` and variational formulation. Note, however, that the implicit time stepping of the contribution with :math:`s^2K_i` in the vertical flux, needed for stability reasons :cite:`Lemarie2012a`, will introduce some errors even in this case. + +First, we split each triangular prism of our mesh into subvolumes characterized by unique values of the expansion/contraction coefficients, vertical gradients and horizontal gradients, to form the triads. We obtain 6 subprisms per prism, formed by sections along midplane and by vertical planes passing through centroids and mid-edges. + +Next, the dissipation functional is written. We will use different, but equivalent formulation, which would follow if tracer equations were written in a weak form. Consider the bilinear form + +.. math:: + 6\mathcal{F}(\tilde T,T)=-\sum_{k,c}\sum_{p=1}^{p=6}A_ch_{kc}(\nabla \tilde{T} + {\bf K}\nabla T)_{kcp}. + +Here the first summation is over mesh prisms (cells and layers), and the second one, over the subprisms :math:`p`. The volume of each subprism is 1/6 of the volume of the full prism (hence the factor 6 on the lhs). Clearly, :math:`2\mathcal{F}(T,T)` corresponds to total variance dissipation. If :math:`T` is the isoneutral density and its gradients are expressed in terms of :math:`\alpha` and :math:`\beta` as for the slope above, :math:`\mathcal{F}` vanishes. + +The last step is to compute the contribution to the rhs of scalar equation from the diffusion term + +.. math:: + (R_T)_{kv}=(1/A_{kv})\partial\mathcal{F}/\partial \tilde T_{kv}. + +Since we deal with layer-integrated equations, the division is over the area of scalar cell :math:`v` instead of division by volume. Writing down the expression for :math:`R_T` is a rather tedious task. The result can be reformulated in terms of the discrete divergence of discrete flux. Indeed, :math:`(R_T)_{kv}A_{kv}` is the volume-integrated rhs, i. e., the sum of fluxes through the faces. + +Note that since :math:`\mathcal{F}` is a bilinear form, the definition of the rhs is always globally consistent. Indeed, the total variance +dissipation is :math:`\sum_{k,v}T_{kv}(R_T)_{kv}A_{kv} +=2\sum_{k,v}T_{kv}\partial\mathcal{F}/\partial \tilde T_{kv}=2\mathcal{F}(T,T)`. + +In summary, the variational formulation originally proposed for quadrilaterals can easily be extended to triangular meshes. All symmetry properties will be granted if computations +are local on subprisms. + +Substituting :math:`{\bf K}` in the form :math:`\mathcal{F}` we get + +.. math:: + \mathcal{F} =\sum_{k,c}\sum_p[-K_i\nabla_h \tilde T\cdot\nabla_h T-K_i\nabla_h \tilde + T\cdot{\bf s}\partial_zT-K_i\partial_z\tilde T{\bf s}\cdot \nabla_h T-(K_d +s^2K_i)\partial_z \tilde + T\partial_zT]_{kcp}(A_ch_{kc}/6). + +The first term does not involve the slope and will not be considered. + +Let us start from the third term and compute its contribution to :math:`\partial +\mathcal{F}/\partial \tilde T_{kv}`. The vertical derivative at level :math:`k` (the top surface of layer :math:`k`) is + +.. math:: + (\partial_zT)_{kv} = \frac{T_{(k-1)v}-T_{kv}}{Z_{(k-1)v}-Z_{kv}}, + +and :math:`\nabla_h T` is defined on cell :math:`c` + +.. math:: + (\nabla_h T)_{kc} = \sum_{v(c)}{\bf G}_{cv}T_{kv}, + +Hence it follows for the contribution from layer :math:`k` and element :math:`c` + +.. math:: + \frac{\partial\mathcal{F}}{\partial \tilde T_{kv}} :\quad + \frac{1}{6}A_ch_{kc}\left[\frac{-1}{Z_{k-1}-Z_k}(-K_i{\bf s})^t_{kcv}(\nabla_h T)_{kc} + + \frac{1}{Z_k-Z_{k+1}} (-K_i{\bf s})^b_{kcv}\cdot(\nabla_h T)_{kc}\right], + +.. math:: + \frac{\partial\mathcal{F}}{\partial \tilde T_{(k-1)v}} :\quad + \frac{1}{6}A_ch_{kc} + \frac{1}{Z_{k-1}-Z_k}(-K_i{\bf s})^t_{kcv}\cdot(\nabla_h T)_{kc}, + + +.. math:: + \frac{\partial\mathcal{F}}{\partial \tilde T_{(k+1)v}} :\quad + \frac{1}{6}A_ch_{kc}\frac{-1}{Z_{k}-Z_{k+1}}(-K_i{\bf s})^b_{kcv}\cdot(\nabla_g T)_{kc}. + + +In the +expressions above, indices :math:`k` and :math:`c` identify the triangular prism, and the index of vertex :math:`v` together with the upper index :math:`t` or :math:`b` identify the subprism (related to :math:`v` and either top or bottom of the full prism). The expression :math:`(K_i{\bf s})^t_{kcv}` means that :math:`K_i` is estimated on level :math:`k` and vertex :math:`v`, and the slope involves the triplet with :math:`\alpha,\beta` at :math:`kv`, the vertical derivatives at :math:`kv` and the horizontal derivatives at :math:`kc`. For :math:`(K_i{\bf s})^b_{kcv}`, the pairs of indices are :math:`(k+1)v,\, kv,\,(k+1)v` and :math:`kc` respectively. + +Now, we combine the contributions from the column associated with cell :math:`c` +that enter the rhs of equation on :math:`T_{kv}` (they come from prisms :math:`(k-1)c`, :math:`kc` and :math:`(k + 1)c`) + +.. math:: + \frac{\partial\mathcal{F}}{\partial\tilde T_{kv}}:\quad \frac{A_c}{6}\left[ + \frac{h_{kc}}{Z_{k-1}-Z_k}(K_i{\bf s}\cdot\nabla_h T)^t_{kcv}+ + \frac{h_{(k-1)c}}{Z_{k-1}-Z_k}(K_i{\bf s}\cdot\nabla_h T)^b_{(k-1)cv}\right. + +.. math:: + \left.-\frac{h_{kc}}{Z_{k}-Z_{k+1}}(K_i{\bf s}\cdot\nabla_h T)^b_{kcv}- + \frac{h_{(k+1)c}}{Z_{k}-Z_{k+1}}(K_i{\bf s}\cdot\nabla T)^t_{(k+1)cv}\right]. + +We easily recognize here the fluxes through the upper and lower surfaces of scalar prism :math:`kv` coming from the part shared with prism :math:`kc`. They are thickness-weighed over the cells on both sides. Indeed, :math:`2(Z_{k-1}-Z_k) = h_{kc}+h_{(k-1)c}` for the top surface and similarly for the bottom. + +We continue with the +contribution from :math:`-s^2K_i\partial_z \tilde T\partial_zT`. +The contribution to equation at (:math:`kv`) from prisms :math:`(k-1)c`, :math:`kc` and :math:`(k+1)c` may come from the following terms in :math:`\mathcal{F}` + +.. math:: + \frac{A_c}{6}\left[(-s^2K_i)^t_{kcv}\frac{\tilde T_{(k-1)v}- \tilde + T_{kv}}{Z_{k-1}-Z_k}\frac{T_{(k-1)v}-T_{kv}}{Z_{k-1}-Z_k}h_{kc}+\right. + +.. math:: + (-s^2K_i)^b_{kcv}\frac{\tilde T_{kv}- \tilde + T_{(k+1)v}}{Z_{k}-Z_{k+1}}\frac{T_{kv}-T_{(k+1)v}}{Z_{k}-Z_{k+1}}h_{kc}+ + +.. math:: + (-s^2K_i)^b_{(k-1)cv}\frac{\tilde T_{(k-1)v}-\tilde + T_{kv}}{Z_{k-1}-Z_k}\frac{T_{(k-1)v}-T_{kv}}{Z_{k-1}-Z_k}h_{(k-1)c}+ + +.. math:: + \left.(-s^2K_i)^t_{(k+1)cv}\frac{\tilde T_{kv}- \tilde + T_{(k+1)v}}{Z_{k}-Z_{k+1}}\frac{T_{kv}-T_{(k+1)v}}{Z_{k}-Z_{k+1}}h_{(k+1)c}\right]. + + +Now, performing differentiation with respect to :math:`T_{kv}`, we find + +.. math:: + \frac{\partial\mathcal{F}}{\partial \tilde T_{kv}} = \frac{A_c}{6} \left[ + \left( \frac{h_{kc}}{Z_{k-1}-Z_k} + (s^2K_i ))^t_{kcv}+ \frac{h_{(k-1)c}}{Z_{k-1}-Z_k} (s^2K_i + ))^b_{(k-1)cv}\right)\frac{ T_{k-1}- T_k}{Z_{k-1}-Z_k}\right. + +.. math:: + +\left.\left(-\frac{h_{kc}}{Z_k-Z_{k+1}} + (s^2K_i))^b_{kcv}-\frac{h_{(k+1)c}}{Z_k-Z_{k+1}}(s^2K_i))^t_{(k+1)cv}\right)\frac{T_k-T_{k+1}}{Z_k-Z_{k+1}}\right]. + +The result is the standard scheme for the vertical diffusion, but the +estimates of :math:`s^2K_i` are thickness-weighted over contributing layers. The fluxes +through the top and bottom surfaces can conveniently be assembled in a cycle over cells and layers. + +We return to the horizontal part in the expression for :math:`\mathcal{F}`. Layer :math:`k` and cell :math:`c` contribute to :math:`\mathcal{F}` as + +.. math:: + \frac{A_c}{6}h_{kc}(\sum_{v(c)}{\bf G}_{cv}\tilde + T_{kv})\cdot\left[\sum_{v(c)}\frac{T_{(k-1)v}- + T_{kv}}{Z_{k-1}-Z_k}(-K_i{\bf s})^t_{kcv}+\right. + +.. math:: + \left.\sum_{v(c)}\frac{T_{kv}- T_{(k+1)v}}{Z_k-Z_{k+1}}(-K_i + {\bf s})^b_{kcv}\right]. + +For the contribution into equation :math:`kv` from :math:`\partial \mathcal{F}/\partial\tilde T_{kv}` it is straightforward to prove that it corresponds to the flux of the quantity in the square brackets through the segments bounding the control volume around :math:`v` inside triangle :math:`c`. Indeed, for geometrical reasons :math:`{\bf G}_{cv}` is :math:`{\bf n}_{cv}/h_{cv}` with :math:`{\bf n}_{cv}` the normal to the edge of :math:`c` opposing vertex :math:`v` directed from this vertex (outer for :math:`c`) and :math:`h_{cv}` the height in :math:`c` drawn from :math:`v`. This implies that :math:`A_c{\bf G}_{cv}={\bf n}_{cv}l_{cv}/2`, where :math:`l_{cv}` is the length of the opposing edge. Obviously, for the two segments bounding the control volume :math:`v` inside cell :math:`c` the sum of normal vectors multiplied with the lengths of segments is :math:`{\bf n}_{cv}l_{cv}/2`. Thus, we arrive at flux representation. + +Although computations as written are possible, FESOM at present follows a simplified scheme which deals with the slope vector averaged over the prism (instead of considering 6 different slope vectors). The motivation for this step is purely numerical -- it is more computationally efficient and more stable. The associated dianeutral mixing is the subject of study. The implementation of full scheme is delayed. diff --git a/docs/main_equations.rst b/docs/main_equations.rst new file mode 100644 index 000000000..876f32338 --- /dev/null +++ b/docs/main_equations.rst @@ -0,0 +1,57 @@ +.. _main_equations: + +Main equations +************** + +.. _sec_cequations: + +Main equations +============== + +The code solves the standard set of equations derived under the standard set of approximations (Boussinesq, hydrostatic, and traditional approximations). +These equations include the momentum equation for horizontal velocity + +.. math:: + \partial_t\mathbf{u}+f\mathbf{e}_z\times\mathbf{u}+(\mathbf{u}\cdot\nabla_h+w\partial_z)\mathbf{u}+\nabla_h p/\rho_0=D_h\mathbf{u}+\partial_z\nu_v\partial_z\mathbf{u}, + :label: eq_cmom + +the hydrostatic equation + +.. math:: + \partial_zp=-g\rho, + :label: eq_chydrost + +the Boussinesq form of the continuity equation + +.. math:: + \partial_zw=-\nabla\cdot\mathbf{u}, + :label: eq_ccont + +and the equations for potential temperature (FESOM is still using potential temperature, which will be replaced by conservative temperature in the nearest future) and salinity + +.. math:: + \partial_t T+\nabla\cdot (\mathbf{u}T)+\partial_z(wT)=\nabla\cdot\mathbf{K}\nabla T, + :label: eq_cT} + + +.. math:: + \partial_t S+\nabla\cdot (\mathbf{u}S)+\partial_z(wS)=\nabla\cdot\mathbf{K}\nabla S. + :label: eq_cS} + +In these equations :math:`\mathbf{u}=(u,v)` is the horizontal velocity, :math:`f` the Coriolis parameter, :math:`\mathbf{e}_z` the unit vertical vector, :math:`\rho_0` the reference density, :math:`p` the pressure, :math:`D_h` the horizontal viscosity operators to be specified further, :math:`\nu_v` the vertical viscosity coefficient, :math:`g` the gravitational acceleration, :math:`T, S`, the potential temperature and salinity and :math:`\mathbf{K}` the diffusivity tensor directing mixing in deep ocean to be isoneutral. The operator :math:`\nabla` is two-dimensional, :math:`\nabla_h=(\partial_x,\partial_y)`, and :math:`\nabla=(\nabla, \partial_z)`. The equations above have to be complemented by the equation of state connecting density with the temperature, salinity and pressure. In the Boussinesq approximation, the pressure featuring in the equation of state is the fluid depth up to a factor :math:`g\rho_0`, so we formally write + +.. math:: + \rho=\rho(T,S,z). + +They also need appropriate initial and boundary conditions. The walls and bottom of the ocean basin are traditionally considered as isolated and impermeable, implying no flux boundary conditions. Flux conditions are imposed on the surface. Bottom acts as a momentum sink through the drag force applied there. + +Ocean free surface denoted :math:`\eta` varies in space and time. An equation governing it is obtained by integrating :eq:`eq_ccont` vertically from the bottom at :math:`z=-H(x,y)` to the top at :math:`z=\eta(x,y,t)` + +.. math:: + \partial_t\eta+\nabla_h\int^{\eta}_{-H}\mathbf{u}dz=-W, + :label: eq_ceta + +where :math:`W` is water flux leaving the ocean through the surface (specified as a part of boundary conditions). We stress that the last equation is not an independent one, but the consequence of the equations written before. + +Transition from continuous to discrete equation includes the steps of spatial and temporal discretization. The spatial discretization is very different for the vertical and horizontal direction and is treated separately. We begin with vertical discretization, followed by temporal discretization and then by the horizontal discretization. + diff --git a/docs/meshes/meshes.rst b/docs/meshes/meshes.rst new file mode 100644 index 000000000..81f1a6192 --- /dev/null +++ b/docs/meshes/meshes.rst @@ -0,0 +1,121 @@ +.. _chap_meshes: + +Meshes +****** + +Mesh files +========== + +FESOM2 as well as FESOM1.4 is formulated on general triangular meshes. There are three mesh files that are read: ``nod2d.out, elem2d.out`` and ``aux3d.out``. Those three files are enough to define a mesh. The files are written as simple ASCII files storing the information on the coordinates of mesh vertices, on how vertices are combined in triangles and on bottom depth at vertices. +The format of the files is as follows: + +nod2d.out +^^^^^^^^^ + ++----------+-----------+-----------+------------+ +|:math:`V` | | | | ++----------+-----------+-----------+------------+ +|:math:`v` |:math:`x_1`|:math:`y_1`|:math:`i_1` | ++----------+-----------+-----------+------------+ +|... |... |... | ... | ++----------+-----------+-----------+------------+ +|:math:`V` |:math:`x_V`|:math:`y_V`|:math:`i_V` | ++----------+-----------+-----------+------------+ + +Here :math:`V` is the number of surface vertices, :math:`x_v` and :math:`y_v` are the longitude and latitude (in angular measure) of vertex :math:`v`, and :math:`i_v` the index of vertex :math:`v`. This field is obsolete and is kept for mesh compatibility with FESOM1.4. + +elem2d.out +^^^^^^^^^^ + ++--------------+--------------+--------------+ +|:math:`C` | | | ++--------------+--------------+--------------+ +|:math:`v_{11}`|:math:`v_{12}`|:math:`v_{13}`| ++--------------+--------------+--------------+ +|... |... |... | ++--------------+--------------+--------------+ +|:math:`v_{C1}`|:math:`v_{C2}`|:math:`v_{C3}`| ++--------------+--------------+--------------+ + +Here :math:`C` is the number of triangles, :math:`v_{c1}`, :math:`v_{c2}`, :math:`v_{c3}` are the vertices of triangle (cell) :math:`c`. The indexes and coordinates of vertices are provided in the ``nod2d`` file (:math:`v`). + +aux3d.out (to be renamed to depth.out) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ++--------------+ +|:math:`L` | ++--------------+ +|:math:`h_{1}` | ++--------------+ +|... | ++--------------+ +|:math:`h_{l}` | ++--------------+ +|... | ++--------------+ +|:math:`h_{L}` | ++--------------+ +|:math:`H_{1}` | ++--------------+ +|... | ++--------------+ +|:math:`H_{v}` | ++--------------+ +|... | ++--------------+ +|:math:`H_{V}` | ++--------------+ + +Here :math:`L` is the number of model levels (e.g. sstandard is 48), :math:`h_{l}` is the depth of each model level and :math:`H_{v}` is the depth at each vertex defined in ``nod2d.out`` file. + +Those three files are read during the mesh partitioning and several additional files are generated by the partitioner in the folder where the mesh files are located. + +Mesh arrays +=========== + +The ``nod2d.out``, ``elem2d.out`` and ``aux3s.out`` files are read in variables and arrays with names inherited from FESOM1.4 where vertices were dubbed nodes and cells elements. Full files are only read on mesh partitioning step. Local copies are saved for each PE and read in production runs. + +Name correspondence +^^^^^^^^^^^^^^^^^^^ + +- :math:`V\,\to {\tt nod2D}` + +- :math:`C\,\to {\tt elem2D}` + +- The array ``coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D)`` stores vertex coordinates in radian measure.``1:myDim_nod2D`` are the vertices that belong to my PE (``myPE``), and ``myDim_nod2D+1:myDim_nod2D+eDim_nod2D`` are the halo vertices. The halo vertices share a common triangle with vertices that belong to ``myPE``, yet do not belong to the ``myPE`` themselves. + +- Each column of array ``elem2D_nodes(1:3,1:myDim_elem2D+eDim_elem2D+eXDim_elem2D)`` stores vertex indices of particular triangle. ``1:myDim_elem2D`` are the triangles that belong to ``myPE``, which are those that contain at least one vertex that belongs to ``myPE``. Thus, triangles with vertices that belong to several PE are counted as my triangle by each PE. The rest combines two types of halo: First triangles sharing an edge with my triangles are added, and then triangles that share vertex with my triangles, but are absent in the smaller halo. + +- Local numbering is used on each PE. Additional arrays are available to do local to global transform if needed. + + +Auxiliary mesh arrays +===================== + +Edge arrays: +^^^^^^^^^^^^ + +- :math:`E\,\to$ {\tt edge2D}` +- ``edge(1:2, myDim_edge2D+eDim_edge2D)`` is the array storing vertices of an edge (its columns are the code analog of the set :math:`V(e)`) +- ``edge_tri(1:2, myDim_edge2D+eDim_edge2D)`` is the array storing triangles on the left and right of the edge (its columns correspond to :math:`C(e)`). +- ``elem2D_edges(1:3, 1:myDim_elem2D)`` stores edges of triangles. It columns correspond to :math:`E(c)` +- ``edge_dxdy(2,myDim_edge2D+eDim_edge2D)`` stores :math:`\mathbf{l}_e` in radian measure. +- ``edge_cross_dxdy(4,myDim_edge2D+eDim_edge2D)`` stores two cross-edge :math:`{\bf d}_{ec}` vectors in physical measure (meters). + + +Neighborhood arrays: +^^^^^^^^^^^^^^^^^^^^ + +- Two arrays ``nod_in_elem2D_num, nod_in_elem2D`` store the number of cell neighbors and their indices for vertex :math:`v` (correspond to :math:`C(v)`) +- Two arrays ``nod_neighbr_num``, ``nod_neighbr`` store the number of vertex neighbors and their indices for vertex :math:`v` (correspond to :math:`V(v)`) + +Areas and derivatives: + +- ``elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D)`` stores areas of elements computed using local flat metrics on elements. +- ``areas(1:K,1:myDim_nod2D+eDim_nod2D)`` are the horizontal areas of scalar control volumes. They are computed combining contributions from triangles. + + +Modularity +========== + +All arrays related to mesh are packed in a structure of derived type ``mesh``, which is passed as an argument to routines using mesh arrays (nearly all routines). In order to keep the array names in subroutines compatible with the old FESOM names (the names listed above), there are pointer associations between the fields of the structure and the names used above. Coding can rely on old names. The same logics is pursued with all other derived data types. This also reflects history of code development. The code was initially using global arrays, which prevented modularity. diff --git a/docs/mybib_fesom2.bib b/docs/mybib_fesom2.bib new file mode 100755 index 000000000..f7b30ff04 --- /dev/null +++ b/docs/mybib_fesom2.bib @@ -0,0 +1,733 @@ +@Book{Conkright2002, + Title = {{World Ocean Database 2001. Vol.1: Introduction. NOAA Atlas NESDID 42}}, + Author = {M.E. Conkright and J. I. Antonov and O.K. Baranove and T. P. Boyer and H.E. Garcia and R. Gelfeld and D. Johnson and R. A. Locarnini and P.P. Murphy and T. D. O'Brien and I. Smolyar and C. Stephens}, + Publisher = {U.S.Gov. Printing office}, + Year = {2002}, + Address = {Washington, D.C.}, + Editor = {Sidney Levitus}, + Pages = {159} +} + +@Article{Zampieri2020, + Title = {{Impact of sea-ice model complexity on the performance of an unstructured-mesh sea-ice/ocean model under different atmospheric forcings}}, + Author = {Zampieri, L. and Kauker, F. and Fröhle, J. and Sumata, H. and Hunke, E.~C. and Goessling, H.}, + Journal = {Earth and Space Science Open Archive ESSOAr}, + Year = {2020}, + Pages = {1--52}, + Doi = {10.1002/essoar.10505308.1} +} + +@article{Hunke2020, + Author = {Hunke, E. C. and Allard, R. and Bailey, D. A. and Blain, P. and Craig, A. and Dupont, F. and DuVivier, A. and Grumbine, R. and Hebert, D. and Holland, M. and Jeffery, N. and Lemieux, J. and Osinski, R. and Rasmussen, T. and Ribergaard, M. and Roach, L. and Roberts, A. and Turner, M. and Winton, M.}, + Title = {{CICE-Consortium/Icepack: Icepack 1.2.1 (Version 1.2.1)}}, + Journal = {Zenodo}, + Doi = {doi.org/10.5281/zenodo.3712299}, + Url = {https://zenodo.org/record/3712299#.Xvn3DPJS9TZ}, + Year = {2020} +} + +@Article{CORE2_Danabasoglu2014, + Title = {{North Atlantic simulations in Coordinated Ocean-ice Reference Experiments phase \{II\} (CORE-II). Part I: Mean states}}, + Author = {Gokhan Danabasoglu and Steve G. Yeager and David Bailey and Erik Behrens and Mats Bentsen and Daohua Bi and Arne Biastoch and Claus B\"oning and Alexandra Bozec and Vittorio M. Canuto and Christophe Cassou and Eric Chassignet and Andrew C. Coward and Sergey Danilov and Nikolay Diansky and Helge Drange and Riccardo Farneti and Elodie Fernandez and Pier Giuseppe Fogli and Gael Forget and Yosuke Fujii and Stephen M. Griffies and Anatoly Gusev and Patrick Heimbach and Armando Howard and Thomas Jung and Maxwell Kelley and William G. Large and Anthony Leboissetier and Jianhua Lu and Gurvan Madec and Simon J. Marsland and Simona Masina and Antonio Navarra and A. J. G. Nurser and Anna Pirani and D. Salas y Mélia and Bonita L. Samuels and Markus Scheinert and Dmitry Sidorenko and Anne-Marie Treguier and Hiroyuki Tsujino and Petteri Uotila and Sophie Valcke and Aurore Voldoire and Qiang Wang}, + Journal = {Ocean~Modell.}, + Year = {2014}, + Pages = {76 - 107}, + Volume = {73}, + Doi = {http://dx.doi.org/10.1016/j.ocemod.2013.10.005}, + Url = {http://www.sciencedirect.com/science/article/pii/S1463500313001868} +} + +@Article{Delworth2012, + Title = {{Simulated Climate and Climate Change in the GFDL CM2.5 High-Resolution Coupled Climate Model}}, + Author = {Thomas L. Delworth and Coauthors}, + Journal = {J.~Climate}, + Year = {2012}, + Pages = {2755--2781}, + Volume = {25}, + Doi = {10.1175/JCLI-D-11-00316.1} +} + +@Article{Ferrari2010, + Title = {{A boundary-value problem for the parameterized mesoscale eddy transport}}, + Author = {Ferrari, R.and Griffies, S. and Nurser, G. and Vallis, G. K.}, + Journal = {Ocean~Modell.}, + Year = {2010}, + Pages = {143–156}, + Volume = {32}, + Doi = {10.1016/j.ocemod.2010.01.004}, +} + +@Article{Ferreira2006, + Title = {{Formulation and implementation of a “residual-mean” ocean circulation model}}, + Author = {Ferreira, D. and Marshall, J.}, + Journal = {Ocean~Modell.}, + Year = {2006}, + Number = {1}, + Pages = {86 - 107}, + Volume = {13}, + Doi = {http://dx.doi.org/10.1016/j.ocemod.2005.12.001}, +} + +@PhdThesis{Fuchs2013, + Title = {{Effiziente parallele Verfahren zur L{\"o}sung verteilter, d{\"u}nnbesetzer Gleichungssysteme eines nichthydrostatischen Tsunamimodells}}, + Author = {Annika Fuchs}, + School = {University of Bremen}, + Year = {2013}, + Month = {November}, + Publisher = {The State and University Library Bremen (SuUB)}, + Url = {http://elib.suub.uni-bremen.de/edocs/00103439-1.pdf} +} + +@Article{CORE2_Griffies2014, + Title = {{An assessment of global and regional sea level for years 1993-2007 in a suite of interannual core-II simulations}}, + Author = {Griffies, {Stephen M.} and Jianjun Yin and Durack, {Paul J.} and Paul Goddard and Bates, {Susan C.} and Erik Behrens and Mats Bentsen and Daohua Bi and Arne Biastoch and B\"oning, {Claus W.} and Alexandra Bozec and Eric Chassignet and Gokhan Danabasoglu and Sergey Danilov and Domingues, {Catia M.} and Helge Drange and Riccardo Farneti and Elodie Fernandez and Greatbatch, {Richard J.} and Holland, {David M.} and Mehmet Ilicak and Large, {William G.} and Katja Lorbacher and Jianhua Lu and Marsland, {Simon J.} and Akhilesh Mishra and {Nurser}, {A. J. G.} and {Salas y Mélia}, David and Palter, {Jaime B.} and Samuels, {Bonita L.} and Jens Schr\"oter and Schwarzkopf, {Franziska U.} and Dmitry Sidorenko and Treguier, {Anne Marie} and Tseng, {Yu heng} and Hiroyuki Tsujino and Petteri Uotila and Sophie Valcke and Aurore Voldoire and Qiang Wang and Michael Winton and Xuebin Zhang}, + Journal = {Ocean~Modell.}, + Year = {2014}, + Pages = {35--89}, + Volume = {78}, + Doi = {10.1016/j.ocemod.2014.03.004}, +} + +@Article{Griffies1998, + Title = {{The Gent–McWilliams skew flux}}, + Author = {Stephen M. Griffies}, + Journal = {J.~Phys.~Oceanogr.}, + Year = {1998}, + Pages = {831--841} +} + +@Article{Hallberg2013, + Title = {{Using a resolution function to regulate parameterizations of oceanic mesoscale eddy effects}}, + Author = {{Hallberg}, R.}, + Journal = {Ocean~Modell.}, + Year = {2013}, + Pages = {92-103}, + Volume = {72}, + Doi = {10.1016/j.ocemod.2013.08.007} +} + +@Article{KPP, + Title = {{Oceanic vertical mixing: A review and a model with a nonlocal boundary layer parameterization}}, + Author = {Large, W. G. and McWilliams, J. C. and Doney, S. C.}, + Journal = {Rev.~Geophys.}, + Year = {1994}, + Pages = {363--403}, + Volume = {32}, + Doi = {10.1029/94rg01872}, +} + +@Article{LargeYeager2009, + Title = {{The global climatology of an interannually varying air--sea flux data set}}, + Author = {Large, W. G. and Yeager, S. G.}, + Journal = {Clim.~Dyn.}, + Year = {2009}, + Pages = {341--364}, + Volume = {33}, + Doi = {10.1007/s00382-008-0441-3}, + Url = {http://dx.doi.org/10.1007/s00382-008-0441-3} +} + +@Article{pARMS2003, + Title = {{pARMS: a parallel version of the algebraic recursive multilevel solver.}}, + Author = {Li, Zhongze and Saad, Yousef and Sosonkina, Masha}, + Journal = {Numerical Lin. Alg. with Applic.}, + Year = {2003}, + Number = {5-6}, + Pages = {485-509}, + Volume = {10}, + Doi = {http://dx.doi.org/10.1002/nla.325} +} + +@Article{Lumpkin2007, + Title = {{Global Ocean Meridional Overturning}}, + Author = { Rick Lumpkin and Kevin Speer }, + Journal = {J.~Phys.~Oceanogr.}, + Year = {2007}, + Number = {10}, + Pages = {2550-2562}, + Volume = {37}, + Doi = {10.1175/JPO3130.1} +} + +@Article{Marzocchi2015, + Title = {{The North Atlantic subpolar circulation in an eddy-resolving global ocean model}}, + Author = {Alice Marzocchi and Jo\"el J.-M. Hirschi and N. Penny Holliday and Stuart A. Cunningham and Adam T. Blaker and Andrew C. Coward}, + Journal = {J.~Mar.~Sys.}, + Year = {2015}, + Pages = {126 - 143}, + Volume = {142}, + Doi = {http://dx.doi.org/10.1016/j.jmarsys.2014.10.007}, + ISSN = {0924-7963} +} + +@Article{Maximenko2005, + Title = {{Observational evidence of alternating zonal jets in the world ocean}}, + Author = {Maximenko, Nikolai A. and Bang, Bohyun and Sasaki, Hideharu}, + Journal = {Geophys.~Res.~Lett.}, + Year = {2005}, + Number = {12}, + Volume = {32}, + Doi = {10.1029/2005GL022728}, + ISSN = {1944-8007} +} + +@Article{PP, + Title = {{Parameterization of vertical mixing in numerical models of tropical oceans}}, + Author = {Pacanowski, R.C. and S.G.H. Philander}, + Journal = {J.~Phys.~Ocean.}, + Year = {1981}, + Pages = {1443-1451}, + Volume = {11} +} + +@Article{Sidorenko2009, + Title = {{On computing transports in finite-element models}}, + Author = {D. Sidorenko and S. Danilov and Q. Wang and A. Huerta-Casas and J. Schr\"oter}, + Journal = {Ocean~Modell.}, + Year = {2009}, + Number = {1–3}, + Pages = {60 - 65}, + Volume = {28}, + Doi = {http://dx.doi.org/10.1016/j.ocemod.2008.09.001}, +} + +@Article{PHC, + Title = {{PHC: a global ocean hydrography with a high-quality Arctic Ocean}}, + Author = {Steele, M. and Morley, R. and Ermold, W.}, + Journal = {J.~Climate}, + Year = {2001}, + Pages = {2079--2087}, + Volume = {14} +} + +@Article{Wang2014, + Title = {{The Finite Element Sea Ice-Ocean Model (FESOM) v.1.4: formulation of an ocean general circulation model}}, + Author = {Wang, Q. and Danilov, S. and Sidorenko, D. and Timmermann, R. and Wekerle, C. and Wang, X. and Jung, T. and Schr\"oter, J.}, + Journal = {Geosci.~Model~Dev.}, + Year = {2014}, + Pages = {663--693}, + Volume = {7}, + Doi = {10.5194/gmd-7-663-2014}, + Url = {http://www.geosci-model-dev.net/7/663/2014/} +} + + +@book{Blazek2001, + Author = {Blazek, J.}, + Year = {2001}, + Title = {Computational fluid dynamics: Principles and applications}, + Publisher = {Elsevier}, + Address = {Amsterdam, London, New York, Paris, Shannon, Tokyo}} + } + +@article{Campin2004, + Author = {Campin, J.-M. and Adcroft, A. and Hill, C. and Marshall, J.}, + Year = {2004}, + Title = {Conservation of properties in a free-surface model}, + Journal = {Ocean Modell.}, + Volume = {6}, + Pages= {221--244} + } + +@article{Colella1984, + Author = {Colella, P. and Woodward, P. R.}, + Year = {1984}, + Title = {The piecewise parabolic method ({PPM}) for gas-dynamical simulations}, + Journal ={ J. Comput. Phys.}, + Volume = {54}, + Pages = {174--201} + } + +@article{Danilov2012, + Author = { Danilov, S.}, + Year = {2012}, + Title = {Two finite-volume unstructured mesh models for large-scale ocean modeling}, + Journal = {Ocean Modell.}, + Volume = {47}, + Pages = {14--25}} + +@article{Danilov2013, + Author = {Danilov, S.}, + Year = {2013}, + Title = {Ocean modeling on unstructured meshes}, + Journal = {Ocean Modell.}, + Volume = {69}, + Pages = {195--210}} + +@article{DanilovAndrosov2015, + Author = {Danilov, S. and Androsov, A.}, + Year = {2015}, + Title = {Cell-vertex discretization of shallow water equations on mixed unstructured meshes}, + Journal = {Ocean Dyn.}, + Volume = {65}, + Pages = {33--47}, + Doi = {10.1007/s10236-014-0790-x}} + +@article{Danilov2015, + Author = {Danilov, S. and Wang, Q. and Timmermann, R. and Iakovlev, N. and Sidorenko, D. and Kimmritz, M. and Jung, T. and Schr\"oter, J.}, + Year = {2015}, + Title ={{Finite-Element Sea Ice Model (FESIM), version 2}}, + Journal = {Geosci. Model Dev.}, + Volume = {8}, + Pages = {1747--1761}} + +@article{DanilovWang2015, + Author = {Danilov, S. and Wang, Q.}, + Year = {2015}, + Title = {Resolving eddies by local mesh refinement}, + Journal = {Ocean Modell.}, + Volume = {93}, + Pages ={75--83}} + +@book{DoneaHuerta2003, + Author = {Donea, J. and Huerta, A.}, + Title = {Finite element methods for flow problems}, + Publisher = {Willey}, + Address = {Chichester}, + Year = {2003}} + +@article{Dukhovskoy2009, + Author = {Dukhovskoy, D. S. and Morey, S. L. and Martin, P. J. and O'��Brien, J. J. and Cooper, C.}, + Year = {2009}, + Title ={Application of a vanishing, quasi-sigma, vertical coordinate for simulation of high-speed, deep currents over the {Sigsbee Escarpment in the Gulf of Mexico}}, + Journal = {Ocean Modell.}, + Volume = {28}, + Pages = {250--265}} + +@article{Hofmeister2010, + Author = {Hofmeister, R. and Burchard, H. and Beckers, J. M.}, + Year = {2010}, + Title = {Non-uniform adaptive vertical grids for 3D numerical ocean models}, + Journal = {Ocean Modell.}, + Volume = {33}, + Pages = {70--86}} + + +@book{Griffiesbook, + Author = {Griffies, S. M.}, + Year = {2004}, + Title = {Fundamentals of Ocean Climate Models}, + Publisher = {Princeton University Press}} + +@article{Hellmer2012, + Author = {Hellmer, H. H. and Kauker, F. and Timmermann, R. and Determann, J. and Rae, J.}, + Year = {2012}, + Title = {Twenty-first-century warming of a large {A}ntarctic ice- shelf cavity by a redirected coastal current}, + Journal = {Nature}, + Volume = {485}, + Pages = {225--228}} + +@article{Nakayama2014, + Author = {Nakayama, Y. and Timmermann, R. and Schr\"oder, M. and Hellmer, H. H.}, + Year = {2014}, + Title = {On the difficulty of modeling {Circumpolar Deep Water intrusions onto the Amundsen Sea continental shelf}}, + Journal = {Ocean Modell.}, + Volume = {84}, + Pages = {26--34}} + +@article{Ringler2013, + Author = {Ringler, T. and Petersen, M. and Higdon, R. and Jacobsen, D. and Maltrud, M. and Jones, P.W.}, + Year = {2013}, + Title = {A multi-resolution approach to global ocean modelling}, + Journal = {Ocean Modell.}, + Volume = {69}, + Pages = {211--232}} + +@article{Petersen2015, + Author = {Petersen, M. R. and Jacobsen, D. W. and Ringler, T. D. and Hecht, M. W. and Maltrud, M. E.}, + Title = {Evaluation of the arbitrary {Lagrangian Eulerian vertical coordinate method in the MPAS-ocean model}}, + Journal = {Ocean Modell.}, + Volume = {86}, + Pages = {93--113}, + Year = {2015}} + +@article{Ringler2002, + Author = {Ringler, T. D. and Randall, D. A.}, + Year = {2002}, + Title = {The {ZM} grid: an alternative to the {Z} grid}, + Journal = {Mon. Wea. Rev.}, + Volume = {130}, + pages = {1411--1422}} + +@article{Leclair2011, + Author = {Leclair, M. and Madec, G.}, + Title = {$\tilde z$-coordinate, an arbitrary {Lagrangian-Eulerian} coordinate separating high and low frequency motions}, + Journal = {Ocean Modell.}, + Volume = {37}, + Pages = {139--152}, + Year = {2011}} + +@article{Lemarie2012a, + Author = {Lemari\'e, F. and Debreu, L. and Shchepetkin, A. F. and McWilliams, J. C.}, + Year ={2012}, + Title = {On the stability and accuracy of the harmonic and biharmonic isoneutral mixing operators in ocean models}, + Journal = {Ocean Modell.}, + Volume ={52-53}, + Pages = {9--35}} + +@article{Lemarie2012b, + Author = {Lemari\'e, F. and Kurian, J. and Shchepetkin, A. F. and Molemaker, M. J. and Colas, F. and McWilliams, J. C.}, + Year = {2012}, + Title = {Are there inescapable issues prohibiting the use of terrain-following coordinates in climate models?}, + Journal = {Ocean Modell.}, + Volume = {42}, + Pages = {57--79}} + +@article{Lemarie2015, + Author = {Lemari\'e, F. and Debreu, L. and Madec, G. and Demange, J. and Molines, J. M. and Honnorat M.}, + Year = {2015}, + Title = {Stability constraints for oceanic numerical models: implications for the formulation of time and space discretizations}, + Journal = {Ocean Modell.}, + Volume = {92}, + Pages = {124--148}} + +@article{Sein2016, + Author = {Sein, D. V. and Danilov, S. and Biastoch, A. and +Durgadoo, J. V. and Sidorenko, D. and Harig, S. and Wang, Q.}, + Year = {2016}, + Title = {Designing variable ocean model resolution based on the observed ocean variability}, + Journal = {J. Adv. Model. Earth Syst.}, + Volume = {08}, + Doi = {10.1002/ 2016MS000650}} + +@article{Shchepetkin2015, + Author = {Shchepetkin, A. F.}, + Year = {2015}, + Title = {An adaptive, {Courant-number-dependent} implicit scheme for vertical advection in oceanic modeling}, + Journal ={Ocean Modell.}, + Volume = {91}, + Pages = {38--69}} + +@article{Timmermann2012, + Author = {Timmermann, R. and Wang, Q. and Hellmer, H. H.}, + Year = {2012}, + Title = {Ice-shelf basal melting in a global finite-element sea–ice ice–shelf ocean model}, + Journal = {Ann. Glaciol.}, + Volume = {53}, + Pages = {303--314}} + + +@article{Wekerle2013, + Author = {Wekerle, C. and Wang, Q. and Danilov, S. and Jung, T. and Schr\"oter, J.}, + Year = {2013}, + Title = {{The Canadian Arctic Archipelago throughflow in a multiresolution global model: Model assessment and the driving mechanism of inter-annual variability}}, + Journal = {J. Geophys. Res.}, + Volume = {118}, + Pages = {1--17}, + Doi = {10.1002/jgrc.20330}} + + +@article{Wang2008, + Author = {Wang, Q. and Danilov, S. and Schr\"oter, J.}, + Title = {{Finite Element Ocean circulation Model based on triangular prismatic elements, with application in studying the effect of vertical discretization}}, + Journal = {J. Geophys. Res.}, + Volume = {113}, + Pages = {C05015}, + Doi = {10.1029/2007JC004482}, + Year = {2008}} + + +@article{FVCOM, + Author = {Chen, C. and Liu, H. and Beardsley, R. C.}, + Title = {An unstructured, finite volume, three-dimensional, primitive equation ocean model: application to coastal ocean and estuaries}, + Journal = {J. Atmos. Ocean. Tech.}, + Volume ={20}, + Pages = {159--186}, + Year = {2003}} + +@article {notz2013, +author = {Notz, Dirk and Haumann, F. Alexander and Haak, Helmuth and Jungclaus, Johann H. and Marotzke, Jochem}, +title = {{Arctic sea-ice evolution as modeled by Max Planck Institute for Meteorology's Earth system model}}, +journal = {J. Adv. Model. Earth Syst.}, +volume = {5}, +number = {2}, +issn = {1942-2466}, +doi = {10.1002/jame.20016}, +pages = {173--194}, +year = {2013}, +} + +@article {schweiger2011, +author = {Schweiger, Axel and Lindsay, Ron and Zhang, Jinlun and Steele, Mike and Stern, Harry and Kwok, Ron}, +title = {{Uncertainty in modeled Arctic sea ice volume}}, +journal = {J.~Geophys.~Res.: Oceans}, +volume = {116}, +number = {C8}, +issn = {2156-2202}, +doi = {10.1029/2011JC007084}, +year = {2011} +} + +@article {fetterer2009, +author = {Fetterer, F. and Knowles K. and Meier, W. and Savoie, M.}, +title = {Sea Ice Index, Digital media}, +journal = {Boulder, Colorado USA: National Snow and Ice Data Center}, +year = {2002, updated 2009}, +doi = {10.7265/N5QJ7F7W} +} + +@article{Wang2016_core1, +title = "An assessment of the Arctic Ocean in a suite of interannual CORE-II simulations. Part I: Sea ice and solid freshwater ", +journal = "Ocean Modelling ", +volume = "99", +number = "", +pages = "110 - 132", +year = "2016", +note = "", +issn = "1463-5003", +doi = "http://dx.doi.org/10.1016/j.ocemod.2015.12.008", +url = "http://www.sciencedirect.com/science/article/pii/S1463500315002449", +author = "Qiang Wang and Mehmet Ilicak and Rüdiger Gerdes and Helge Drange and Yevgeny Aksenov and David A. Bailey and Mats Bentsen and Arne Biastoch and Alexandra Bozec and Claus Böning and Christophe Cassou and Eric Chassignet and Andrew C. Coward and Beth Curry and Gokhan Danabasoglu and Sergey Danilov and Elodie Fernandez and Pier Giuseppe Fogli and Yosuke Fujii and Stephen M. Griffies and Doroteaciro Iovino and Alexandra Jahn and Thomas Jung and William G. Large and Craig Lee and Camille Lique and Jianhua Lu and Simona Masina and A.J. George Nurser and Benjamin Rabe and Christina Roth and David Salas y Mélia and Bonita L. Samuels and Paul Spence and Hiroyuki Tsujino and Sophie Valcke and Aurore Voldoire and Xuezhu Wang and Steve G. Yeager", +abstract = "Abstract The Arctic Ocean simulated in fourteen global ocean-sea ice models in the framework of the Coordinated Ocean-ice Reference Experiments, phase \{II\} (CORE II) is analyzed. The focus is on the Arctic sea ice extent, the solid freshwater (FW) sources and solid freshwater content (FWC). Available observations are used for model evaluation. The variability of sea ice extent and solid \{FW\} budget is more consistently reproduced than their mean state in the models. The descending trend of September sea ice extent is well simulated in terms of the model ensemble mean. Models overestimating sea ice thickness tend to underestimate the descending trend of September sea ice extent. The models underestimate the observed sea ice thinning trend by a factor of two. When averaged on decadal time scales, the variation of Arctic solid \{FWC\} is contributed by those of both sea ice production and sea ice transport, which are out of phase in time. The solid \{FWC\} decreased in the recent decades, caused mainly by the reduction in sea ice thickness. The models did not simulate the acceleration of sea ice thickness decline, leading to an underestimation of solid \{FWC\} trend after 2000. The common model behavior, including the tendency to underestimate the trend of sea ice thickness and March sea ice extent, remains to be improved. " +} + +@article{Haid2015, +Author= {Haid, V. and Timmermann, R. and Ebner, L. and Heinemann, G.}, +Year = {2015}, +title = {{Atmospheric forcing of coastal polynyas in the south-western Weddell Sea}}, +Journal = {Antarctic Science}, +Volume = {27}, +Pages = {388--402} +} + +@article{Haid2013, +Author= {Haid, V. and Timmermann, R.}, +Year = {2013}, +Title ={Simulated heat flux and sea ice production at coastal polynyas in the southwestern {Weddell Sea}}, +Journal = {J. Geophys. Res.}, +Volume= {118}, +Pages ={2640--2652}, +doi = {10.1002/jgrc.20133}} + +@article{Wekerle2016, +Author = {Wekerle, C. and Wang, Q. and Danilov, S. and Jung, T.}, +Title = {{Pathways of Atlantic Water in the Nordic Seas: locally eddy-permitting ocean simulation in a global setup}}, +Journal = {J. Gephys. Res.}, +Year = {2016}, +Pages={submitted}} + + +@article{Adcroft_Hallberg_2006, +Author = {Alistair Adcroft and Robert Hallberg}, +Title = {On methods for solving the oceanic equations of motion + in generalized vertical coordinates}, +Journal = {Ocean Modell.}, +Volume = {11}, +Year = {2006}, +Pages = {224--233}} + +@article{Burchard_Beckers_2004, + Author = {Burchard, H. and Beckers, J. M.}, + Year = {2004}, + Title = {Non-uniform adaptive vertical grids in + one-dimensional numerical ocean models}, + Journal = {Ocean Modell.}, + Volume = {6}, + Pages = {51--81}} + +@article{SkamarockMenchaca2010, + Author = {Skamarock, W. C. and Menchaca, M.}, + Year = {2010}, + Title = {Conservative transport schemes for spherical +geodesic grids: high-order reconstructions for forward-in-time +schemes}, + Journal = {Mon. Wea. Rev.}, + Volume = {138}, + Pages = {4497--4508}} + +@article{GentMcWilliams1990, +Author = {Gent, P.R. and McWilliams, J.C.}, +Year = {1990}, +Title= {Isopycnal mixing in ocean circulation models}, +Journal = {J. Phys. Oceanogr.}, +Volume = {20}, +Pages = {150--155}} + +@article{Gent1995, +Author = {Gent, P.R. and Willebrand, J. and McDougall, T.J. and McWilliams, J.C.}, +Year = {1995}, +Title = {Parameterizing eddy-induced tracer transports in ocean +circulation models}, +Journal = {J. Phys. Oceanogr.}, +Volume = {25}, +Pages = {463--474}} + +@article{Korn2016, +Author = {Korn, P.}, +Title = {A Class of Mimetic Ocean Primitive Equation Models: The Hydrodynamical Kernel}, +Year = {2016}, +Journal ={}, +Pages = {(submitted)}} + +@article{Wang2016b, +Author = {Wang, Q. and Danilov, S. and Jung, T. and +Kaleschke, L. and Wernecke, A.}, +Year = {2016}, +Title ={{Sea ice leads in the Arctic Ocean: Model assessment, interannual variability and trends}}, +Journal = {Geophys. Res. Lett.}, +Volume = {43}, +Pages = {7019–7027}, +Doi = {10.1002/2016GL068696}} + + +@article{core_so1, +Title = {{An assessment of Southern Ocean water masses and sea ice during 1988–2007 in a suite of interannual CORE-II simulations}}, +Author = { +Downes, S. M. and Farneti, R. and Uotila, P. and Griffies, S. M. and +Marsland, S. J. and Bailey, David and Behrens, Erik and Bentsen, Mats and Bi, Daohua and +Biastoch, A. and B\"oning, Claus and Bozec, Alexandra and Canuto, Vittorio M. and Chassignet, Eric and Danabasoglu, Gokhan and Danilov, Sergey and Diansky, Nikolay and Drange, Helge and Fogli, Pier Giuseppe and Gusev, A. and Howard, Armando and Ilicak, Mehmet and Jung, Thomas and Kelley, Maxwell and Large, William G. and Leboissetier, Anthony and Long, Matthew and Lu, Jianhua and Masina, Simona and Mishra, Akhilesh and Navarra, Antonio and Nurser, A.J. George and Patara, Lavinia and Samuels, Bonita L. and Sidorenko, Dmitry and Spence, Paul and Tsujinou, Hiroyuki and Wang, Qiang and Yeager, Stephen G. }, +Journal = {Ocean Modelling}, +Volume = { 94}, +Year = {2015}, +Pages = {67--94}} + +@article{Stouffer2005, +Author = {Stouffer, R. J. and Broccoli, A. J. and Delworth, T. L. and Dixon, K, W. and Gudgel, R. and Held, I. and Hemler, R. and Knutson, T. and Lee, H.-C. and Schwarzkopf, M. D. and Soden, B. and Spelman, M. J. and Winton, M. and Zeng, F.}, +Title = {{GFDL's CM2 global coupled climate models. Part IV: idealized climate response}}, Journal = {J. Clim.}, +Volume = {19}, +Pages ={723--740}, +Year = {2005}} + +@article{Jochum2008, +Author = {Jochum, M. and Danabasoglu, G. and Holland, M. and Kwon, Y.-O. and Large, W. G.}, +Title = {Ocean viscosity and climate}, +Journal = {J. Geophys. Res.}, +Volume = {113}, +Pages = {C06017}, +Doi = {10.1029/2007JC004515}, +Year = {2008}} + + +@article{Engwirda2017, +title={High-order accurate finite-volume formulations for the pressure gradient force in layered ocean models}, +author={Engwirda, D. and Kelley, M. and Marshall, J.}, +journal={Ocean Modelling}, +volume={116}, +year={2017}, +pages={1--15} +} +@article{Adcroft2008, +title={A finite volume discretization of the pressure gradient force using analytic integration}, +author={Adcroft, A. and Hallberg, R. and Harrison, M.}, +journal={Ocean Modelling}, +volume={22}, +year={2008}, +pages={106--113} +} + +@article{Hallberg2009, +title={Reconciling estimates of the free surface height in {L}agrangian vertical coordinate ocean models with mode-split time stepping}, +author={Hallberg, R. and Adcroft, A.}, +journal={Ocean Modelling}, +volume={29}, +year={2009}, +pages={15--26} +} + +@article{Berntsen2011, +title={A perfectly balanced method for estimating the internal pressure gradients in r-coordinate ocean models}, +author={Berntsen, J.}, +journal={Ocean Modelling}, +volume={38}, +year={2011}, +pages={85--95} +} + +@article{Burchard2019, +title={Mixing Estimates for Estuaries}, +author={Burchard, H. and Lange, X and Klingbeil, K. and MacCready, P}, +doi={10.1175/JPO-D-18-0147.1}, +journal={J. Phys. Oceanogr.}, +volume={49}, +year={2019}, +pages={631--648} +} + + +@article{Schepetkin2011, +title={Accurate {B}oussinesq oceanic modeling with a practical, ‘‘{S}tiffened’’ {E}quation of {S}tate}, +author={Shchepetkin, A. F. and McWilliams, J. C.}, +journal={Ocean Modelling}, +volume={38}, +year={2011}, +pages={41--70} +} + +@article{Shchepetkin2003, +author={Shchepetkin, A.F. and McWilliams, J.C.}, +year={2003}, +title={A method for computing horizontal pressure-gradient force in an oceanic model with a non-aligned vertical coordinate}, journal={J. Geophys. Res.}, +volume={108}, +pages={3090--3124}, +doi={10.1029/ 2001JC001047} +} + +@article{Shchepetkin2005, +author={Shchepetkin, A.F. and McWilliams, J.C.}, +year={2005}, +title={The regional ocean modeling system: a split-explicit, free-surface, topography-following-coordinate oceanic model}, journal={Ocean Modeling}, +volume={9}, +pages={347--404}, +doi={10.1016/j.ocemod.2004.08.002} +} + +@article{Engwirda2016, +title={A {WENO}-type slope-limiter for a family of piecewise polynomial methods}, +author={Engwirda, D. and Kelley, M.}, +journal={arXiv:1606.08188v1}, +year={2016} +} + +@article{Lee2002, +title={Spurious Diapycnal Mixing of the Deep Waters in an Eddy-Permitting Global Ocean Model}, +author={Lee, M.-M. and Coward A. C. and Nurser, A. J. G}, +volume={32}, +year={2002}, +pages={1522--1535} +} + +@article{Megann2018, +title={Estimating the numerical diapycnal mixing in an eddy-permitting ocean model}, +author={Megann, A.}, +journal={Ocean Modelling}, +volume={121}, +year={2018}, +pages={19--33} +} + +@article{xu2018, +title={On Mapping the Diapycnal Water Mass Transformation of the Upper {N}orth {A}tlantic {O}cean}, +author={Xu, X. and Rhines, P. B. and Chassignet, E. P.}, +year={2018}, +journal={J. Phys. Oceanogr.}, +volume={48}, +pages={2233--2258} +} + +@book{FoxKemperMenemenlis2008, +author={Fox-Kemper, B. and Menemenlis, D.}, +year={2008}, +title={Can large eddy simulation techniques improve mesoscale rich ocean models?}, +series={Geoph. Monog. Series 177}, +pages={319--337}, +publisher={AGU} +} + +@article{Adcroft2019, +author={Adcroft, A. and Anderson, W. and Balaji, V. and Blanton, C. and Bushuk, M. and Dufour, C. O. and Dunne, J. P. and Griffies, S. M. and Hallberg, R. and Harrison, M. J. and Held, I. M. and Jansen, M. F. and John, J. G. and Krasting, J. P. and Langenhorst, A. R. and Legg, S. and Liang, Z. and McHugh, C. and Radhakrishnan, A. and Reichl, B. G. and Rosati, T. and Samuels, B. L. and Shao, A. and Stouffer, R. and Winton, M. and Wittenberg, A. T. and Xiang, B. and Zadeh, N. and Zhang, R.}, +year={2019}, +title={The {GFDL} global ocean and sea ice model {OM}4.0: {M}odel description and simulation features}, journal={Journal of Advances in Modeling Earth Systems}, +volume={11}, +pages={3167--3211}, +doi={https://doi.org/10.1029/2019MS001726} +} + +@article{DanilovKutsenko2019, +title={On the geometric origin of spurious waves in finite-volume discretizations of shallow water equations on triangular meshes}, +author={Danilov, S. and Kutsenko, A.}, +journal={J. Comput. Phys.}, +volume={398}, +year={2019}, +pages={108891}, +doi={https://doi.org/10.1016/j.jcp.2019.108891} +} diff --git a/docs/ocean_configuration/ocean_configuration.rst b/docs/ocean_configuration/ocean_configuration.rst new file mode 100644 index 000000000..314287ace --- /dev/null +++ b/docs/ocean_configuration/ocean_configuration.rst @@ -0,0 +1,105 @@ +.. _chap_ocean_configuration: + +Ocean configuration (namelist.oce) +********************************** + +Sections of the namelist +======================== + +Section &oce_dyn +"""""""""""""""" + +- **C_d=0.0025**, Bottom drag, nondimensional. +- **A_ver= 1.e-4**, Vertical viscosity, m^2/s +- **laplacian=.false.**, Use Laplacian viscosity +- **A_hor=0.**, Background horizontal viscosity +- **A_hor_max=0.**, Maximum viscosity allowed (to limit Smag and Leith contributions when they are too large +- **Div_c=.5**, Modified Leith viscosity, nondimensional, 0.3 -- 1.0 +- **Leith_c=.05**, The strength of the Leith viscosity. +- **tau_c= 0.**, Controls the strength of filters (1.5 and 0.2 for dt=1min and 15min, respectively) +- **Smag_c=0.**, Smagorinsky viscosity, nondimensional, 0.1 --0.2 +- **biharmonic=.false.**, Use biharmonic viscosity. +- **visc_option=5**, Option 2 is to use Laplacian+Leith+biharmonic background, Option 5 is to use easy backscatter. +- **easy_bs_scale = 35.**, Area scaling, to be used with visc_option=5 (easy backscatter) +- **easy_bs_return= 1.5**, Coefficient for returned sub-gridscale energy, to be used with visc_option=5 (easy backscatter) +- **Abh0=0.**, Biharmonic viscosity, m^4/s +- **scale_area=5.8e9**, Viscosity and diffusion are for an element with ``scale_area``. +- **mom_adv=2**, 1=vector CV, p1 vel, 2=sca. CV, 3=vector inv. +- **free_slip=.false.**, Switch for the free slip. +- **i_vert_visc=.true.** +- **w_split=.false.** +- **w_exp_max=1.e-3** +- **SPP=.false.**, Salt Plume Parameterization. +- **Fer_GM=.true.**, Switch for the GM after Ferrari et al. 2010 +- **K_GM_max=3000.0**, Maximum GM thickness diffusivity (m2/s) +- **K_GM_min=2.0**, Maximum GM thickness diffusivity (m2/s) +- **K_GM_bvref=2**, def of bvref in ferreira scaling 0=srf,1=bot mld,2=mean over mld,3=weighted mean over mld +- **K_GM_rampmax=40.0**, Resol >K_GM_rampmax[km] GM on +- **K_GM_rampmin=30.0**, Resol 0, + +.. math:: + \overline{T}^-_{kv}=T_{(k-1)v}-(2G^c+G^u)h_{(k-1)v}/6, \quad w_{kv}<0, + +where :math:`G^c=(T_{(k-1)v}-T_{kv})/(Z_{(k-1)v}-Z_{kv})` is the central gradient estimate and :math:`G^u=(T_{kv}-T_{(k+1)v})/(Z_{kv}-Z_{(k+1)v})` for positive and :math:`G^u=(T_{(k-2)v}-T_{(k-1)v})/(Z_{(k-2)v}-Z_{(k-1)v})` for negative :math:`w_{kv}`. Note that our estimates of gradients are based on values that are mean over control volume. So the estimates themselves are not very accurate. It is the combination (of central and upwind) values that is accurate. + +Using + +.. math:: + 2w_{kv}\overline{T}_{kv}=w_{kv}(\overline{T}^+_{kv}+\overline{T}^-_{kv})+(1-\gamma)|w_{kv}|(\overline{T}^+_{kv}-\overline{T}^-_{kv}) + +will give a fourth-order scheme on a uniform mesh if :math:`\gamma=1`. A blended third-fourth order scheme follows for :math:`0\le\gamma<1`. + +Compact scheme (also the Parabolic Spline Method +================================================ + +We need scalar values at interfaces. An elegant way to find them is to use splines, requiring continuity of reconstruction and first derivatives at level locations. The result is + +.. math:: + \overline{T}_{k+1}\frac{1}{h_k}+2\overline{T}_{k}\left(\frac{1}{h_k}+\frac{1}{h_{k-1}}\right)+\overline{T}_{k-1}\frac{1}{h_{k-1}}=3\left(T_k\frac{1}{h_k}+T_{k-1}\frac{1}{h_{k-1}}\right). + +The boundary conditions are those of natural spline, i. e., + +.. math:: + 2\overline{T}_{1}+\overline{T}_{2}=3T_1,\quad 2\overline{T}_{N+1}+\overline{T}_{N}=3T_N. + +This method requires three-diagonal solve, which takes the same time as two vertical loops. The name `compact` reflects the fact that the equation above involves stencil of minimum size. It becomes the PSM method if used with semi-Lagrangian time stepping, as in PPM. + +The result is more accurate than PPM (see further). It is of the fourth order as PPM on uniform grid, but has a smaller residual term. Those who learned piecewise linear finite elements may see some analogies in the reconstruction procedure. ROMS uses this method for vertical advection of both tracers and momentum. + +Piecewise Parabolic Method +-------------------------- + +To be written + + +FCT +--- + +The FCT limiter in FESOM2 uses the first-order upwind method as the low-order monotonic method and a combination of methods above as the high-order one. The low-order solution and the antidiffusive fluxes (the difference between the high-order and low-order fluxes) are assembled in one pass (in a cycle over edges for the horizontal part and over vertices for the vertical part). We experimented with separate pre-limiting of horizontal and vertical antidiffusive fluxes and found that commonly this leads to an increased dissipation, for the horizontal admissible bounds are in many cases too tight. For this reason, the computation of admissible bounds and limiting is three-dimensional. As a result, it will not necessarily fully eliminate non-monotonic behavior in the horizontal direction. The basic difference from the FCT algorithm used in FESOM1.4 is the construction of low-order solution. In FESOM1.4 the low-order solution is obtained by adding an artificial diffusion to the high-order right hand side. Using the FCT roughly doubles the cost of transport algorithm, but makes the code more stabe in practice. + +Vertical velocity splitting +--------------------------- + +As demonstrated in :cite:`Lemarie2015`, the strongest practical Courant number limitation is imposed by vertical advection in isolated patches adjacent to the coast. The code numerical efficiency can be improved if measures are taken to stabilize it with respect to sporadic events with large vertical velocities. Unstructured meshes may even be more vulnerable to such events because mesh irregularity can easily provoke a noisy pattern in :math:`w` just on its own. FESOM offers the approach proposed by :cite:`Shchepetkin2015` according to which the vertical transport velocity is split into two contributions :math:`w=w_{ex}+w_{im}` where the first one is determined by the maximum admissible Courant number, and the second one takes the rest. The advection with :math:`w_{ex}` is done explicitly using schemes mentioned above. The advection with :math:`w_{im}` is implicit. It uses the first-order upwind (backward Euler in time). This method leads to an operator that is diagonally dominant. The implicit advective terms are added to the implicit vertical mixing terms and the resulting three-diagonal system of equations is solved with the standard sweep algorithm. Because of this, only very small additional costs incur if this algorithm is used. Although the first order upwind scheme is dissipative, it is applied only in critical cases to excessively large velocities. + +Operator splitting +------------------ + +FESOM2 does not use operator splitting at present and takes the horizontal and vertical fluxes in a single step. However, from the viewpoint of increasing admissible time steps it is worthwhile to provide the implementation of advection in which tracers are updated separately for horizontal and vertical contributions. As is well known, the sequence horizontal-vertical should alternate with vertical-horizontal in this case. This work is planned, and this section will be updated in due course. + +GM and isoneutral operators +=========================== + +The eddy-induced transport +-------------------------- + +FESOM2 follows the algorithm proposed by :cite:`Ferrari2010` to implement the Gent-McWilliams (GM) parameterization :cite:`GentMcWilliams1990`,:cite:`Gent1995`. FESOM1.4 operates with skewsion (see :cite:`Griffiesbook` for mathematical detail). While working with skewsion is convenient in FESOM1.4 due to its variational formulation, it is less straightforward in FESOM2. Besides, the algorithm by :cite:`Ferrari2010` provides an explicit expression for the eddy bolus velocity streamfunction. + +The bolus velocity :math:`{\bf v}^*=({\bf u}^*,w^*)` is expressed in terms of eddy-induced streamfunction :math:`\boldsymbol{\Psi}`, + +.. math:: + {\bf v}^*=\nabla_3\times\boldsymbol{\Psi}, \quad \boldsymbol{\Psi}=\boldsymbol{\gamma}\times{\bf k}, + +where :math:`\boldsymbol{\gamma}` is a two-dimensional vector. In agreement with :cite:`Ferrari2010`, it is computed by solving + +.. math:: + (c^2\partial_{zz}-N^2)\boldsymbol{\gamma}=(g/\rho_0)\kappa\nabla_z\sigma + :label: eq_gm + +with boundary conditions :math:`\boldsymbol{\gamma}=0` at the surface and ocean bottom. In this expression, :math:`c` is the speed of the first baroclinic mode, :math:`\sigma` the isoneutral density, :math:`\kappa` the thickness diffusivity, :math:`N` the Brunt–Väisälä frequency, and the index :math:`z` means that the gradient is computed for fixed :math:`z` (it differs from the gradient along layers, :math:`\nabla_z\sigma=\nabla\sigma-\partial_z\sigma\nabla Z`). In terms of the vector :math:`\boldsymbol{\gamma}` the components of eddy-induced velocity are computed as + +.. math:: + {\bf u}^*=\partial_z\boldsymbol{\gamma}, \quad w^*=-\nabla\cdot\boldsymbol{\gamma}. + +It is easy to see that solving :eq:`eq_gm` plays a role of tapering, for the solution is a smooth function satisfying boundary conditions. +The residual velocity :math:`{\bf u}_r={\bf u}+{\bf u}^*`, :math:`w_r=w+w^*` which is the sum of the eddy-induced velocity and the mean velocity :math:`({\bf u},w)` is consistent with :math:`\overline h` because the vertically integrated divergence of :math:`{\bf u}^*` is zero. The inclusion of eddy-induced velocity implies that the thickness and tracer equations are now written for the residual velocity :math:`{\bf u}_r`. + +Although the natural placement for :math:`\boldsymbol{\gamma}` is at the cell centroids, it is moved to the mesh vertices in order to reduce the amount of computations. The vertical location is at full levels (layer interfaces). The horizontal bolus velocities are then computed at cell centroids as + +.. math:: + {\bf u}^*_{c}=(1/3) \partial_z \sum_{v(c)}\boldsymbol{\gamma}_{v}. + +The vertical bolus velocity :math:`w^*` is then found together with :math:`w` at the end of the ALE step and the full residual velocity is used to advect tracers. + +We compute the speed :math:`c` in the WKB approximation as + +.. math:: + c=\frac{1}{\pi}\int_{-H}^0Ndz. + +Among other factors, the magnitude of the thickness diffusivity :math:`\kappa` depends on the resolution :math:`r` and the local Rossby radius :math:`L_R=c/f`: + +.. math:: + \kappa=\kappa_0 f_{\kappa}(r/L_R), + +where :math:`f_{\kappa}` is a cut-off function that tends to 0 if :math:`r/L_R<1` and to 1 otherwise. The resolution is defined as a square root of the area of the scalar control volume. On general meshes it may exhibit substantial local variations, so smoothing over the neighbor vertices is done. + +Isoneutral diffusion +-------------------- + +Assuming that the slope of isopycnals is small, the diffusivity tensor can be written as + +.. math:: + {\bf K}= + \begin{pmatrix} K_i & 0 &s_xK_i \\ + 0 & K_i & s_yK_i\\ + s_xK_i & s_yK_i & s^2K_i+K_d + \end{pmatrix} + :label: eq_kiso + +Here :math:`K_i` and :math:`K_d` are the isoneutral and diapycnal diffusivities, and :math:`{\bf s}` is the isoneutral slope vector. Its derivatives are computed along layers, + +.. math:: + {\bf s}=(s_x,s_y)=-\nabla\sigma/\partial_z\sigma. + +If layer interfaces deviate substantially from geopotential surfaces, for example, if layers follow the bottom topography, the slope vector can be substantially larger than typically found on :math:`z`-coordinate meshes. Mixed derivatives in :math:`\nabla_3 h {\bf K}\nabla_3` operator in this case can limit the time step :cite:`Lemarie2012a`. To maintain stability, the term :math:`h\partial_z(s^2K_i+K_d )\partial_z` is treated implicitly, as suggested by :cite:`Lemarie2012a`. Appendix :math:`app:isoneutral` shows the details of the numerical discretization of isoneutral diffusion. + +Equation of state +----------------- + +FESOM still works with potential temperature. The conservative temperature and TEOS10 will be made available soon. The estimates of density by the equation of state are made columnwise. To facilitate these estimates, for each column the arrays are computed of quantities appearing with different powers in :math:`z`. Then they are combined to estimate the in-situ density and pressure as well as to compute the Brunt–Väisälä frequency in the same routine. \ No newline at end of file diff --git a/docs/subcycling_instead_solver.rst b/docs/subcycling_instead_solver.rst new file mode 100644 index 000000000..a46d53243 --- /dev/null +++ b/docs/subcycling_instead_solver.rst @@ -0,0 +1,58 @@ +.. _subcycling_instead_solver: + +Subcycling instead of solver +**************************** + +Semi-implicit treatment of external mode has a drawback of suboptimal parallel scalability in the limit of very small partitions. An alternative approach is to use split explicit time stepping when the external mode (elevation and vertically integrated velocity) are time stepped with a small step (subcycling), and then filtered to remove fast contributions. This option will be added in the future, and at present optimal algorithms are explored. The description of this section gives one possibility. Flux form of momentum advection is used. We take + +.. math:: + \eta^n=(\overline h^{n-1/2}+\overline h^{n+1/2})/2, + +since it provides the second-order accurate estimate. + +An easiest approach is to run subcycles between time levels :math:`n` and :math:`n+2`, with subsequent averaging to level :math:`n+1`. + +The contribution from the elevation :math:`\eta^n` is kept in the predicting :math:`\Delta \tilde{\bf U}` because it also incorporates the implicit solve for vertical viscosity. Then the compensation term with :math:`\eta^n` appears in :eq:`eq_barus` below. This can be avoided if implicit vertical viscosity substep is moved to the end of velocity step. + +Instead of :eq:`eq_baru` and :eq:`eq_etaU` we introduce subcycles indexed with :math:`j`, :math:`j=0:2J`, with :math:`\eta^{n+j/J}` shortcut to :math:`\eta^j` and same for :math:`\overline{\bf U}` in several formulas below. The simplest form of subcycling looks like + +.. math:: + \eta^{j+1}-\eta^j=-(\nabla\cdot\overline{\bf U}^{j}+W^j)\tau/J. + :label: eq_etas + +.. math:: + \overline{\bf U}^{j+1}-\overline{\bf U}^j=\overline{\Delta{\bf U}}/J-g(\tau/J)(H+\overline h^{n+1/2})\nabla(\eta^{j+1}-\eta^n). + :label: eq_barus + +This is a forward--backward scheme. + +Other forms of subcycling can be used to increase stability and reduce the number of subcycles :math:`2J+1`. Many of them are discussed by :cite:`Shchepetkin2005`. In particular, an AB3-AM4 scheme (see also :cite:`Lemarie2015` is demonstrated to provide good accuracy and stability. + +On completing sybcycles one is at time level :math:`n+2`. In order to eliminate possible high frequencies, averaging is done to time level :math:`n+1`: + +.. math:: + \overline{\bf U}^{n+1}=(2J+1)^{-1}\sum_j\overline{\bf U}^j,\quad \eta^{n+1}=(2J+1)^{-1}\sum_j\eta^j. + +The common further action is to use :math:`\overline{\bf U}^{n+1}` for the barotropic transport combined with the baroclinic transport diagnosed from :math:`{\bf U}^{n+1}`. We introduce first the new baroclinic transport by writing + +.. math:: + {\bf U}^*_k={\bf U}^n_k+\Delta{\bf U}_k, + +.. math:: + \tilde{\bf U}^{n+1}_k={\bf U}^*_k + -\overline{{\bf U}}^*\frac{h^{n+1}_k}{H+\eta^{n+1}}. + +It is then updated to the full transport velocity by + +.. math:: + {\bf U}^{n+1}_k=\tilde{\bf U}^{n+1}_k+\overline{{\bf U}}^{n+1}\frac{h^{n+1}_k}{H+\eta^{n+1}}. + +Here :math:`h_k^{n+1}` is an estimate of layer thickness at time step :math:`n+1`. + +A recent suggestion is to replace the time stepping in :eq:`eq_etas`-:eq:`eq_barus` by a dissipative one modifying :eq:`eq_barus` as + +.. math:: + \overline{\bf U}^{j+1}-\overline{\bf U}^j=\overline{\Delta{\bf U}}/J-g(\tau/J)(H+\overline h^{n+1/2})\nabla((1+\lambda)\eta^{j+1}-\lambda \eta^{j}-\eta^n). + :label: eq_barusm + +The parameter :math:`0\le \lambda<1` controls the dissipation which alone can be sufficient to remove the high-frequency component in :math:`\overline{\bf U}` and :math:`\eta`. It remains to be seen whether this is sufficient to fully eliminate averaging and shorten integration just to :math:`n+1` instead of :math:`n+2`. \ No newline at end of file diff --git a/docs/temporal_discretization.rst b/docs/temporal_discretization.rst new file mode 100644 index 000000000..7283ab0c7 --- /dev/null +++ b/docs/temporal_discretization.rst @@ -0,0 +1,193 @@ +.. _temporal_discretization: + +Temporal discretization: Asynchronous time stepping +*************************************************** + +FESOM2 uses asynchronous time stepping which means that velocity and scalar fields are shifted by a half time step. It is assumed that velocity is known at integer time steps and scalars at half-integer time steps. The time index is denoted by :math:`n`. The advantage of this simple arrangement is that the discrete velocity is centered in time for the time step of scalar quantities, and pressure gradient is centered for the velocity time step. All other terms in equations require some interpolations to warrant second-order accuracy in time, and routinely the second-order Adams-Bashforth estimate is used in FESOM2. Other variants of time stepping are explored, and will be added in future. + +Thickness and tracers +===================== + +We advance thickness and scalar quantities as + +.. math:: + h^{n+1/2}-h^{n-1/2}=-\tau[\nabla\cdot({\bf u}^nh^{*})+w^t-w^b+ W^{n-1/2}\delta_{k1}] + :label: eq_ht + +and + +.. math:: + h^{n+1/2}T^{n+1/2}-h^{n-1/2}T^{n-1/2}=-\tau[\nabla\cdot({\bf u}^nh^{*}T^n)+w^tT^t-w^bT^b+W^{n-1/2}T_W\delta_{k1}]+ D_T. + :label: eq_tracert + +Here :math:`\tau` is the time step and :math:`D_T` stands for the terms related to diffusion. The time index on :math:`w` is not written, for :math:`w` is diagnosed by :math:`{\bf u}` and :math:`h`. + +.. note:: + Note that :math:`\mathbf{u}` and :math:`h` enter similarly in the equations for thickness and tracer. This warrants consistency: if :math:`T=\rm{const}`, the tracer equation reduces to the thickness equation. + +Since the horizontal velocity is centered in time, these equations will be of the second order for the advective terms if :math:`h^*` is centered (:math:`h^*=h^n`) or if any other high-order estimate is used. The treatment of :math:`h^*` depends on options. At present FESOM allows only the simplest choices when :math:`h` vary only slightly with respect to unperturbed thickness and :math:`\partial_th` is prescribed by the evolution of :math:`\eta`. We take :math:`h^*=h^{n-1/2}` in this case because this choice can easily be made consistent with elevation, see further. + +Although this formally reduces the time order to the first, the elevation is usually computed with the accuracy shifted to the first-order in large-scale ocean models, including this one. + +.. note:: + Other options, including those allowing :math:`h` to follow isopycnal dynamic will be gradually added to FESOM. They will differ in the way how :math:`h^*` is computed. + + +Elevation +========= + +We introduce + +.. math:: + \overline{h}=\sum_k h_k-H, + +where :math:`H` is the unperturbed ocean thickness. :math:`\overline h` would be identical to the elevation :math:`\eta` in the continuous world, but not in the discrete formulation here. +The difference between these two quantities is that the elevation is defined at integer time levels. More importantly, it has to be computed so that the fast external mode is filtered. FESOM2 uses implicit method for that. The point is to make :math:`\overline h` and :math:`\eta` fully consistent. + +In order to filter the external mode :math:`\eta` is advanced implicitly in time. For :math:`h^*=h^{n-1/2}` we write for the elevation + +.. math:: + \eta^{n+1}-\eta^n=-\tau(\alpha(\nabla\cdot\sum_k{h}_k^{n+1/2}{\bf u}_k^{n+1}+W^{n+1/2})+(1-\alpha)(\nabla\cdot\sum_k{h}_k^{n-1/2}{\bf u}_k^{n}+W^{n-1/2})). + :label: eq_etat + +Here :math:`\alpha` is the implicitness parameter (:math:`0.5\le\alpha\le1`) in the continuity equation. Note that the velocities at different time steps are taken with their respective thicknesses in the same way as they appear in the thickness equation :eq:`eq_ht`. The approach below is inspired by :cite:`Campin2004`. The equation for thicknesses can be vertically integrated giving, under the condition that the surface value of :math:`w^t` vanishes, + +.. math:: + \overline{h}^{n+1/2}-\overline{h}^{n-1/2}=-\tau\nabla\cdot\sum_k{h}_k^{n-1/2}{\bf u}_k^n-\tau W^{n-1/2}. + :label: eq_hbar + +Expressing the rhs in the formula for :math:`\eta` :eq:`eq_etat` through the difference in surface displacements :math:`\overline{h}` from the last formula we see that :math:`\eta` and :math:`\overline{h}` can be made consistent if we require + +.. math:: + \eta^n=\alpha \overline{h}^{n+1/2}+(1-\alpha)\overline{h}^{n-1/2}. + :label: eq_etan + + +To eliminate the possibility for :math:`\eta` and :math:`\overline{h}` to diverge, we compute :math:`\eta^n` from the last formula, then estimate :math:`\eta^{n+1}` by solving dynamical equations (equation :eq:`eq_etat` requires :math:`\mathbf{u}^{n+1}`, so it is solved simultaneously with the momentum equation), and use it only to compute :math:`{\bf u}^{n+1}`. On the new time step a 'copy' of :math:`\eta^{n+1}` will be created from the respective fields :math:`\overline{h}`. +We commonly select :math:`\alpha=1/2`, in this case :math:`\eta^n` is just the interpolation between the two adjacent values of :math:`\overline{h}`. Note that :eq:`eq_etan` will be valid for any estimate :math:`h^*` as far as it is used consistently in the product with the horizontal velocity. + +.. note:; + The implicit way of solving for :math:`\eta` means that FESOM uses iterative solver at this step. Such solvers are thought to be a major bottleneck in massively parallel applications. This is the reason why many groups abandon solvers and go for subcycling+filtering algorithms for the treatment of external (approximately barotropic) dynamics. Such an option will be added to FESOM. Its elementary variant is described in Appendix. We work on more advanced variants. + +Momentum equation +================= + +The momentum equation has to be solved together with the elevation equation :eq:`eq_etat`, which is done with a predictor-corrector method. The method is largely an adaptation of pressure correction algorithm from computational fluid dynamics. + +Assuming the forms :eq:`eq_mom_vei` or :eq:`eq_mom_f2` we write (using :math:`\partial_z` for brevity instead of the top-bottom differences) + +.. math:: + {\bf u}^{n+1}-{\bf u}^{n}=\tau({\bf R}^{n+1/2}_u+\partial_z\nu_v\partial_z{\bf u}^{n+1}-g\nabla(\theta\eta^{n+1}+(1-\theta)\eta^n)). + +Here :math:`\theta` is the implicitness parameter for the elevation, :math:`{\bf R}^{n+1/2}_u` includes all the terms except for vertical viscosity and the contribution from the elevation which are treated implicitly. To compute :math:`{\bf R}^{n+1/2}_u`, we use the second-order Adams-Bashforth (AB) method for the terms related to the momentum advection and Coriolis acceleration. The AB estimate of quantity :math:`q` is + +.. math:: + q^{AB}=(3/2+\epsilon)q^n-(1/2+\epsilon)q^{n-1}. + +Here :math:`\epsilon` is a small parameter (:math:`\le0.1`) needed to ensure stability in the case of advection operators. The contribution of pressure :math:`P` does not need the AB interpolation (because it is centered). The horizontal viscosity is estimated on the level :math:`n` because this term is commonly selected from numerical, not physical reasons. + +- We write the predictor equation + + .. math:: + {\bf u}^{*}-{\bf u}^{n}-\tau\partial_z\nu_v\partial_z({\bf u}^{*}-{\bf u}^n)=\tau({\bf R}^{n+1/2}_u+\partial_z\nu_v\partial_z{\bf u}^{n}-g\nabla\eta^n). + :label: eq_predict + + The operator on the lhs connects three vertical levels, leading to three-diagonal linear problem for :math:`\Delta {\bf u}=\mathbf{u}_k^*-\mathbf{u}_k^n` for each vertical column. Solving it we find the predicted velocity update :math:`\Delta {\bf u}`. (The vertical viscosity contribution on the rhs is added during the assembly of the operator on the lhs.) + + +- The corrector step is written as + + .. math:: + {\bf u}^{n+1}-{\bf u}^{*}=-g\tau\theta\nabla(\eta^{n+1}-\eta^n). + :label: eq_cor + +- Expressing the new velocity from the last equation and substituting it into the equation for the elevation :eq:`eq_etat`, we find + + .. math:: + \frac{1}{\tau}(\eta^{n+1}-\eta^n)-\alpha\theta g\tau\nabla\cdot(\overline{h}^{n+1/2}+H)\nabla(\eta^{n+1}-\eta^n)dz= \nonumber \\ + -\alpha(\nabla\cdot\sum_k{h}_k^{n+1/2}({\bf u}^n+\Delta{\bf u})_k+W^{n+1/2})-(1-\alpha)(\nabla\cdot\sum{h}_k^{n-1/2}{\bf u}_k^n+W^{n-1/2}). + :label: eq_etaa + + Here, the operator part depends on :math:`h^{n+1/2}`, which is the current value of thickness. The last term on the rhs is taken from the thickness computations on the previous time step. + +The overall solution strategy is as follows. + +- Compute :math:`\eta^n` from :eq:`eq_etan`. Once it is known, compute :math:`\Delta {\bf u}` from :eq:`eq_predict`. + +- Update the matrix of the operator on the lhs of :eq:`eq_etaa`. Solve :eq:`eq_etaa` for :math:`\eta^{n+1}-\eta^n` using an iterative solver and estimate the new horizontal velocity from :eq:`eq_cor`. + +- Compute :math:`\overline{h}^{n+3/2}` from :eq:`eq_hbar`. + +- Determine layer thicknesses and :math:`w` according to the options described below. + +- Advance the tracers. The implementation of implicit vertical diffusion will be detailed below. + +Momentum equation in form :eq:`eq_mom_fl` +========================================= + +Here an additional difficulty is the presence of :math:`h` in the time derivative and on the rhs. The rule is that :math:`\mathbf{u}` should appear with the same :math:`h^*` as in the thickness or tracer equation. We used thus far the choice :math:`h^*\mathbf{u}^n=h^{n-1/2}\mathbf{u}^n` in these equations, which implies that the time derivative will be + +.. math:: + \partial_t(\mathbf{u}h)=( h^{n+1/2}\mathbf{u}^{n+1}-h^{n-1/2}\mathbf{u}^n)/\tau, + +:math:`h^{n-1/2}` will be used on the rhs with pressure gradient term, and the predictor equation will be written for :math:`h^{n-1/2}\Delta\mathbf{u}`. In this case :math:`h^{n-1/2}` can be factored out of the lhs, which will make predictor solution similar. The corrector step will be modified to + +.. math:: + h^{n+1/2}{\bf u}^{n+1}-h^{n-1/2}{\bf u}^{*}=-gh^{n-1/2}\tau\theta\nabla(\eta^{n+1}-\eta^n). + :label: eq_corf1 + + +It will lead to the replacement of :math:`h^{n+1/2}` in the lhs of :eq:`eq_etaa` by :math:`h^{n-1/2}`. We stress once again that the expressions in this section are for the particular choice of :math:`h^*`. + +Current options for the vertical coordinate +=========================================== + +The following options for the vertical coordinate are available at present: + +- Linear free surface: If we keep the layer thicknesses fixed, the time derivative in :eq:`eq_ht` drops out, and it becomes the standard equation to compute :math:`w`, starting from the bottom and continuing to the top, + + + .. math:: + w^t-w^b+\nabla\cdot({h\bf u})=0. + + + If this option is applied also to the first layer, the freshwater flux cannot be taken into account in the thickness equation. Its contribution to the salinity equation is added through a virtual salinity flux. In this option, :math:`w` at the (fixed) ocean surface differs from zero, and so do the tracer fluxes. They do not necessarily integrate to zero over the ocean surface which is why tracer conservation is violated. + +- Full (nonlinear) free surface: We adjust the thickness of the upper layer, while the thicknesses of all other layers are kept fixed, :math:`\partial_th_k=0` for :math:`k>1`. The thickness equations are used to compute :math:`w` on levels :math:`k=2:K_v` starting from the bottom. The change in the thickness of the first layer :math:`h^{n+3/2}_1-h^{n+1/2}_1` is given by :eq:`eq_hbar` written for the respective time interval. In this case there is no transport through the upper moving surface (the transport velocity :math:`w_1` is identically zero). This option requires minimum adjustment with respect to the linear free surface. However, the matrix of the operator in :eq:`eq_etaa` needs to be re-assembled on each time step. + +- We can distribute the total change in height :math:`\partial_t\overline h` between several or all eligible layers. Due to our implementation, at *each* scalar horizontal location they can only be the layers that do not touch the bottom topography. If all eligible layers are involved we estimate + + + .. math:: + \partial_t h_k=(h_k^0/\tilde H)\partial_t\overline h, + + + where :math:`h_k^0` are the unperturbed layer thicknesses and :math:`\tilde H` is their sum for all eligible layers. The thickness of the layers adjacent to the topography is kept fixed. The equation on thickness, written for each layer, is used to compute transport velocities :math:`w` starting from zero bottom value. This variant gives the so-called :math:`z^*`-coordinate. + +- Additional options will be gradually added. Layer thicknesses can vary in many ways provided that their tendencies sum to :math:`\partial_t\overline h` over the layers. In particular, requiring that transport velocities :math:`w` are zero, isopycnal layers can be introduced. The levels can move with high-pass vertical velocities, leading to the so called :math:`\tilde z` coordinate, see :cite:`Leclair2011`, :cite:`Petersen2015` or follow density gradients as in :cite:`Hofmeister2010`. The unperturbed layer thicknesses need not follow the geopotential surfaces and can be terrain following for example. + +- The ALE vertical coordinate is only a framework where many options are in principle possible. Additional measures may be required in each particular case, such as computations of pressure gradients with reduced errors. Updated transport algorithms may be needed (in the spirit of :cite:`Lemarie2012b` to minimize spurious numerical mixing in terrain-following layers. These generalizations are among the topics of ongoing work. + + +Implicit vertical diffusion +=========================== + +We return to the tracer equation :eq:`eq_tracert`. The vertical diffusion in this equation may present a CFL limitation and is treated implicitly. + +Because of varying layer thicknesses, the implementation of implicit vertical diffusion needs slight adjustment compared to the common case of fixed layers. We write, considering time levels :math:`n-1/2` and :math:`n+1/2`, + +.. math:: + h^{n+1/2}T^{n+1/2}-h^{n-1/2}T^{n-1/2}=\tau(R_T^{n}+(K_{33}\partial_zT^{n+1/2})^t-(K_{33}\partial_zT^{n+1/2})^b) + + +and split it into + +.. math:: + h^{n+1/2}T^{*}-h^{n-1/2}T^{n-1/2}=\tau R_T^{n} + +and + +.. math:: + h^{n+1/2}(T^{n+1/2}-T^{*})=\tau(K_{33}\partial_z(T^{n+1/2}-T^*)+K_{33}\partial_zT^*)|^t_b. + +Here :math:`R_T` contains all advection terms and the terms due to the diffusion tensor except for the diagonal term with :math:`K_{33}`. The preliminary computation of :math:`T^*` is necessary to guarantee that a uniform tracer distribution stays uniform (some significant digits will be lost otherwise). diff --git a/docs/time_stepping_transport.rst b/docs/time_stepping_transport.rst new file mode 100644 index 000000000..fd228702c --- /dev/null +++ b/docs/time_stepping_transport.rst @@ -0,0 +1,72 @@ +.. _time_stepping_transport: + +Time stepping for the transport :math:`\mathbf{U}=h\mathbf{u}` instead of velocity +********************************************************************************** + +For the momentum equation in the form :eq:`eq_mom_fl` an alternative variant of time stepping is possible when the quantity :math:`\mathbf{U}=h\mathbf{u}` is advanced instead of velocity :math:`\mathbf{u}`. This will simultaneously imply that :math:`h^*=h^{n}`, making the thickness and transport equations centered with respect to time step. The thickness appearing with the pressure gradient should be them :math:`h^{n+1/2}`, which provides a centered estimate. The advection and Coriolis terms are computed through AB2 (or AB3) time stepping, and if needed, the Coriolis term can be time stepped semiimplicitly. + +The time stepping algorithm can be formulated as follows + +.. math:: + {\bf U}^{n+1}-{\bf U}^{n}=\tau({\bf R}_{U}^{n+1/2}-gh^{n+1/2}\nabla(\theta\eta^{n+1}+(1-\theta)\eta^n)+(\nu_v\partial_z{\bf u}^{n+1})^t-(\nu_v\partial_z{\bf u}^{n+1})^b) + +with + +.. math:: + {\bf R}_{U}^{n+1/2}=({\bf R}_{U}^*)^{AB}-h^{n+1/2}(\nabla p_h+g\rho\nabla Z)/\rho_0, + +and + +.. math:: + {\bf R}_{U}^*=-\nabla\cdot({\bf U}^n{\bf u}^n)-(w^t{\bf u}^t-w^b{\bf u}^b)^n-f{\bf k}\times{\bf U}^n. + +The last expression combines the terms that need the AB method for stability and the second order. We use :math:`h^{n+1/2}` to compute :math:`Z` and follow the same rule as :eq:`eq_etan` to compute :math:`\eta^n`. The steps are: + +- Do the predictor step and compute :math:`\Delta \tilde{\bf U}=\tau{\bf R}_U^{n+1/2}-\tau gh^{n+1/2}\nabla\eta^n`. + +- Update for implicit viscosity. + + .. math:: + \partial_t\Delta{\bf U}-(\nu_v\partial_z(\Delta{\bf U}/h^{n+1/2}))|^t_b=\Delta\tilde{\bf U}+(\nu_v\partial_z({\bf U}^n/h^{n+1/2}))|^t_b. + +- Solve for new elevation. We write first + + .. math:: + \overline{\bf U}=\sum_k{\bf U}, + + and similarly for other quantities, getting + + .. math:: + \overline{\bf U}^{n+1}-\overline{\bf U}^n=\overline{\Delta{\bf U}}-g\tau(H+\overline h^{n+1/2})\theta\nabla(\eta^{n+1}-\eta^n) + :label: eq_baru + +and + + .. math:: + \eta^{n+1}-\eta^n=-\tau\nabla\cdot(\alpha\overline{\bf U}^{n+1}+(1-\alpha)\overline{\bf U}^{n})-\tau(\alpha W^{n+1/2}+(1-\alpha)W^{n-1/2}). + :label: eq_etaU + +Eliminating :math:`\overline{\bf U}^{n+1}` between these two equations, one gets the equation on elevation increment :math:`\Delta\eta=\eta^{n+1}-\eta^n` + + .. math:: + \Delta\eta-g\tau^2\theta\alpha\nabla\cdot((H+\overline h^{n+1/2})\nabla\Delta\eta)=-\tau\nabla\cdot(\alpha\overline{\Delta{\bf U}}+\overline{\bf U}^n)-\tau(\alpha W^{n+1/2}+(1-\alpha)W^{n-1/2}) + + In reality, everything remains similar to the vector-invariant case, and the matrix to be inverted is the same. + +- Correct the transport velocities as + + .. math:: + {\bf U}^{n+1}-{\bf U}^n={\Delta{\bf U}}-g\tau h^{n+1/2}\theta\nabla\Delta\eta. + :label: eq_corrU + +- Proceed with ALE and determine :math:`w^{n+1}`, :math:`h^{n+3/2}`, :math:`T^{n+3/2}`. + +- The new velocities are estimated as + + .. math:: + {\bf u}^{n+1}={\bf U}^{n+1}/h^{n+1}. + + Here :math:`h^{n+1}` can be computed either in the agreement with the ALE procedure (:math:`\eta^{n+1}` is already known) or interpolating between :math:`n+1/2` and :math:`n+3/2` time levels. + + +This alternative form of time stepping is more elegant. The horizontal velocity appears in most places in the layer equations as the product with respective thickness, and the alternative form takes this naturally into account. It will be added in due time together with the development of ALE options. \ No newline at end of file diff --git a/docs/vertical_discretization.rst b/docs/vertical_discretization.rst new file mode 100644 index 000000000..4f79d1aa2 --- /dev/null +++ b/docs/vertical_discretization.rst @@ -0,0 +1,88 @@ +.. _vertical_discretization + +Vertical discretization: Layer thicknesses and layer equations +************************************************************** + +FESOM2 uses Arbitrary Lagrangian Eulerian (ALE) vertical coordinate. This implies that level surfaces are allowed to move. ALE vertical coordinate on its own is only the framework enabling moving level surfaces. The way how they are moving depends on one's particular goal and may require additional algorithmic steps. Two limiting cases obviously include the case of fixed :math:`z`-levels and the case when levels are isopycnal surfaces. At present only vertical coordinates that slightly deviate from :math:`z`-surfaces are supported in FESOM, but many other options will follow. + +The implementation of ALE vertical coordinate in FESOM2 basically follows :cite:`Ringler2013`. An alternative approach, used by MOM6, see, e.g., :cite:`Adcroft_Hallberg_2006` :cite:`Adcroft2019`, is in the exploratory phase. + +The essential step toward the ALE vertical coordinate lies in confining equations of section :ref:`sec_cequations` to model layers. + +- Introduce layer thicknesses :math:`h_k=h_k(x,y,t)`, where :math:`k=1:K` is the layer index and :math:`K` the total number of layers. They are functions of the horizontal coordinates and time in a general case. Each layer consists of prisms defined by the surface mesh but partly masked by bottom topography. + +- Layers communicate via the transport velocities :math:`w_{kv}` through the top and bottom boundaries of the prisms. The transport velocities are the differences between the physical velocities in the direction normal to the layer interfaces and the velocities due to the motion of the interfaces. These velocities are defined at the interfaces (the yellow points in :numref:`vertical`). For layer :math:`k` the top interface has index :math:`k` and the bottom one is :math:`k+1`. Note that :math:`w_{kv}` coincides with the vertical velocity only if the level surfaces are flat. + +- All other quantities - horizontal velocities :math:`{\bf u}`, temperature :math:`T`, salinity :math:`S` and pressure :math:`p` are defined at mid-layers. Their depths will be denoted as :math:`Z_k`, and the notation :math:`z_k` is kept for the depths of mesh levels (the layer interfaces). They are functions of horizontal coordinates and time in a general case. + +The equations of motion, continuity and tracer balance are integrated vertically over the layers. We will use :math:`T` as a representative of an arbitrary tracer. + + +- The continuity equation becomes the equation on layer thicknesses + +.. math:: + \partial_t h_k+\nabla\cdot({\bf u}h)_k+(w^{t}-w^b)_k+W\delta_{k1}=0, + :label: eq_thickness + + +- and the tracer equation becomes + +.. math:: + \partial_t(hT)_k+\nabla\cdot({\bf u}hT)_k+(w^{t}T^t-w^bT^b)_k+WT_W\delta_{k1}=\nabla\cdot h_k{\bf K}\nabla T_k. + :label: eq_tracer + + +Here, :math:`W` is the water flux leaving the ocean at the surface, it contributes to the first layer only (hence the delta-function); :math:`T_W` is the property transported with the surface water flux and the indices :math:`t` and :math:`b` imply the top and the bottom of the layer. + +The right hand side of :eq:`eq_tracer` contains the 3 by 3 diffusivity tensor :math:`{\bf K}`. We still use :math:`\nabla` in :eq:`eq_tracer` for the 3D divergence (the outer :math:`\nabla`) for brevity, but assume the discrete form :math:`\nabla_h(...)+((...)^t-(...)^b)/h_k`, where :math:`(...)` are the placeholders for the horizontal and vertical components of 3D vector it acts on. A correct discretization of the diffusivity term is cumbersome and will be explained below. + +- Vertical sum of :eq:`eq_thickness` over layers with account that :math:`w^t=0` at the free surface and :math:`w_b=0` at the bottom gives the 'layered' version of the elevation equation + + .. math:: + \partial_t\eta+\nabla_h\cdot\sum_kh_k{\bf u}_k+W=0. + :label: eq_eta + +- The layer-integrated momentum equation in the flux form is + + .. math:: + \partial_t(h{\bf u})+\nabla_h\cdot(h{\bf u u})+w^t{\bf u}^t-w^b{\bf u}^b+ + f{\bf k}\times{\bf u}h +h(\nabla_h p+g\rho\nabla Z)/\rho_0= \nonumber \\ D_{uh}{\bf u}+(\nu_v\partial_z{\bf u})^t-(\nu_v\partial_z{\bf u})^b, + :label: eq_mom_fl + + with :math:`D_{uh}{\bf u}` the horizontal viscosity operator for the flux form (to be specified later), :math:`\nu_v` the vertical viscosity coefficient, :math:`f` the Coriolis parameter and :math:`{\bf k}` a unit vertical vector. We ignore the momentum source due to the added water :math:`W` at the surface. Note that it could be more natural to formulate the solution procedure in terms of the horizontal layer transport velocities :math:`{\bf U}=h{\bf u}` in this case, but the present implementation in FESOM deals with separate :math:`h` and :math:`\mathbf{u}`. + +- The pressure field is expressed as + + .. math:: + p=g\rho_0\eta+P, \quad P_{1}=p_a+g\rho_1h_1/2, \quad P_k=P_{k-1}+g(\rho_{k-1}h_{k-1}+ \rho_kh_k)/2. + :label: eq_pressure + + with :math:`p_a` the atmospheric pressure, :math:`\rho` the deviation of density from its reference value :math:`\rho_0`, and :math:`P` is the variable hydrostatic pressure due to :math:`\rho`. The pressure gradient in continuous equations :eq:`eq_cmom` has to be computed at constant :math:`z`. The model levels deviate from surfaces :math:`z=\rm{const}`. The term :math:`g\rho\nabla Z`, appearing together with the horizontal pressure gradient in :eq:`eq_mom_fl` compensates for the deviation. The quantity :math:`Z` appearing in this term is the :math:`z`-coordinate of the midplane of the layer with the thickness :math:`h`. + +.. note:: + Although :math:`\nabla p+g\rho\nabla Z` gives a formally correct estimate of pressure gradient at constant :math:`z`, the errors of discretization of the two terms in this expression become an issue if level surfaces deviate from :math:`z`-surfaces. They are known as pressure gradient errors and require special care. FESOM will propose a selection of known algorithms, including the finite-volume algorithms of pressure gradient force that follows :cite:`Engwirda2017` but is adapted to the triangular prisms of FESOM mesh. + +- Instead of using the flux form of momentum equation :eq:`eq_mom_fl` representing momentum balance in the layer one can work without layer integration. Of particular interest is the vector-invariant form written as + + .. math:: + \partial_t{\bf u}+\frac{\omega+f}{h}{\bf k}\times{\bf u}h+((w\partial_z{\bf u})^t+(w\partial_z{\bf u})^b)/2 +\nabla (p/\rho_0+{\bf u}^2/2)+g\rho\nabla Z/\rho_0= \nonumber \\ D_u{\bf u}+((\nu_v\partial_z{\bf u})^t-(\nu_v\partial_z{\bf u})^b)/h. + :label: eq_mom_vei + + Here, the identity + + .. math:: + {\bf u}\cdot\nabla{\bf u}=\omega{\bf k}\times{\bf u}+\nabla({\bf u}^2/2),\quad \omega={\bf k}\cdot(\nabla\times{\bf u}) + + was used. + +- The second term on the lhs of :eq:`eq_mom_vei` includes division and multiplication with the layer thickness, and in doing so, it introduces the layer potential vorticity (PV), :math:`q=(\omega+f)/h` and its transport :math:`{\bf u}h`. The layer thickness formally drops out from the equation :eq:`eq_mom_vei` which is still continuous in the horizontal direction. However, in the discrete case, the location of vorticity points (vertices) and velocity points is different. By keeping separate :math:`h` the equation will then operate on the same horizontal transports as the thickness equations. This is the prerequisite for developing discretizations that conserve potential vorticity. + +- One more form is possible where the vector-invariant representation is not used + + .. math:: + \partial_t({\bf u})+\nabla\cdot({\bf u u})+(w^t{\bf u}^t-w^b{\bf u}^b)/h+ + f{\bf k}\times{\bf u} +(\nabla p+g\rho\nabla Z)/\rho_0= \nonumber \\ D_{u}{\bf u}+(A_v\partial_z{\bf u})^t-(A_v\partial_z{\bf u})^b/h. + :label: eq_mom_f2 + +The default version in FESOM2 is :eq:`eq_mom_fl`. Although the versions are derived from the same continuous equations, they are not equivalent in the discrete case. + diff --git a/docs/zreferences.rst b/docs/zreferences.rst new file mode 100644 index 000000000..c22d33306 --- /dev/null +++ b/docs/zreferences.rst @@ -0,0 +1,11 @@ +.. _references: + +.. only:: html + + References + ********** + + .. rubric:: References + +.. bibliography:: mybib_fesom2.bib + diff --git a/dwarf/dwarf_ice/CMakeLists.txt b/dwarf/dwarf_ice/CMakeLists.txt new file mode 100644 index 000000000..4feed315f --- /dev/null +++ b/dwarf/dwarf_ice/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 3.9) + +# set default build type cache entry (do so before project(...) is called, which would create this cache entry on its own) +if(NOT CMAKE_BUILD_TYPE) + message(STATUS "setting default build type: Release") + set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.") +endif() + +project(FESOM2.0) +set(TOPLEVEL_DIR ${CMAKE_CURRENT_LIST_DIR}) +set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") +set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs FESOM_COUPLED to work)") +set(CRAY OFF CACHE BOOL "compile with cray ftn") +set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") +#set(VERBOSE OFF CACHE BOOL "toggle debug output") +#add_subdirectory(oasis3-mct/lib/psmile) +add_subdirectory(src) diff --git a/dwarf/dwarf_ice/README.md b/dwarf/dwarf_ice/README.md new file mode 100644 index 000000000..4dcde298b --- /dev/null +++ b/dwarf/dwarf_ice/README.md @@ -0,0 +1,8 @@ +1st.: link necessary files for dwarf + ./dwarf_linkfiles.sh + +2nd.: compile dwarf + ./configure.sh + +3rd.: run dwarf model from work/ + \ No newline at end of file diff --git a/dwarf/dwarf_ice/dwarf_ini/CMakeLists.txt b/dwarf/dwarf_ice/dwarf_ini/CMakeLists.txt new file mode 100644 index 000000000..c7cedc905 --- /dev/null +++ b/dwarf/dwarf_ice/dwarf_ini/CMakeLists.txt @@ -0,0 +1,98 @@ +cmake_minimum_required(VERSION 3.4) + +project(fesom Fortran) + +option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) + +# get our source files +set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt +#if(${USE_ICEPACK}) +# file(GLOB sources_Fortran ${src_home}/*.F90 +# ${src_home}/icepack_drivers/*.F90 +# ${src_home}/icepack_drivers/Icepack/columnphysics/*.F90) +#else() +file(GLOB sources_Fortran ${src_home}/*.F90) +#endif() +#list(REMOVE_ITEM sources_Fortran ${src_home}/fesom_partition_init.F90) +#file(GLOB sources_C ${src_home}/*.c) + +# generate a custom file from fesom_version_info.F90 which includes the current git SHA +#set(FESOM_ORIGINAL_VERSION_FILE ${src_home}/fesom_version_info.F90) +#set(FESOM_GENERATED_VERSION_FILE ${CMAKE_CURRENT_BINARY_DIR}/fesom_version_info-generated.F90) +#list(REMOVE_ITEM sources_Fortran ${FESOM_ORIGINAL_VERSION_FILE}) # we want to compile the generated file instead +#list(APPEND sources_Fortran ${FESOM_GENERATED_VERSION_FILE}) +#add_custom_command(OUTPUT 5303B6F4_E4F4_45B2_A6E5_8E2B9FB5CDC4 ${FESOM_GENERATED_VERSION_FILE} # the first arg to OUTPUT is a name for a file we never create to make sure this command will run on every re-build (let our file be the second arg, as the first file is inadvertently removed by make) +# COMMAND ${CMAKE_COMMAND} -DFESOM_ORIGINAL_VERSION_FILE=${FESOM_ORIGINAL_VERSION_FILE} -DFESOM_GENERATED_VERSION_FILE=${FESOM_GENERATED_VERSION_FILE} -P GitRepositoryInfo.cmake +# WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} +# COMMENT "determining ${PROJECT_NAME} git SHA ...") + +#if(${FESOM_STANDALONE}) +# list(REMOVE_ITEM sources_Fortran ${src_home}/cpl_driver.F90) +#endif() +#list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90) +#list(REMOVE_ITEM sources_C ${src_home}/fort_part.c) + +# depends on the metis library +#add_subdirectory(../lib/metis-5.1.0 ${PROJECT_BINARY_DIR}/metis) +#include_directories(../lib/metis-5.1.0/include) +# depends on the parms library +#add_subdirectory(../lib/parms ${PROJECT_BINARY_DIR}/parms) + +#add_subdirectory(async_threads_cpp) + +#include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindNETCDF.cmake) + +#add_library(${PROJECT_NAME}_C ${sources_C}) +#target_compile_definitions(${PROJECT_NAME}_C PRIVATE PARMS USE_MPI REAL=double DBL HAS_BLAS FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 SGI LINUX UNDER_ MPI2) +#target_link_libraries(${PROJECT_NAME}_C parms) #metis + +# create our binary (set its name to name of this project) +add_executable(${PROJECT_NAME} ${sources_Fortran}) +#target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS -DMETIS_VERSION=5 -DPART_WEIGHTED -DMETISRANDOMSEED=35243) +#if(${DISABLE_MULTITHREADING}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE DISABLE_MULTITHREADING) +#endif() +#if(${FESOM_COUPLED}) +# include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindOASIS.cmake) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __oasis) +#endif() +#if(${OIFS_COUPLED}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __oifs) +#endif() +#if(${USE_ICEPACK}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __icepack) +#endif() +if(${VERBOSE}) + target_compile_definitions(${PROJECT_NAME} PRIVATE VERBOSE) +endif() + + +# CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) +if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) +# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) + target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) +endif() + + +#elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) +# target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) +#endif() +#target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) +#target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) +#target_include_directories(${PROJECT_NAME} PRIVATE ${SCRIP_Fortran_INCLUDE_DIRECTORIES}) +#target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRARIES} ${OASIS_Fortran_LIBRARIES}) +#target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) +#target_link_libraries(${PROJECT_NAME} async_threads_cpp) +#set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) + +set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") +get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) +get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) +install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) diff --git a/dwarf/dwarf_ice/dwarf_ini/fesom.F90 b/dwarf/dwarf_ice/dwarf_ini/fesom.F90 new file mode 100644 index 000000000..9dcc0d083 --- /dev/null +++ b/dwarf/dwarf_ice/dwarf_ini/fesom.F90 @@ -0,0 +1,104 @@ +!=============================================================================! +! +! Finite Volume Sea-ice Ocean Model +! +!=============================================================================! +! The main driving routine +!=============================================================================! + +program main +USE MOD_MESH +USE MOD_PARTIT +USE MOD_ICE +USE MOD_PARSUP +USE g_comm_auto +USE par_support_interfaces +USE restart_derivedtype_module +use fortran_utils +IMPLICIT NONE + +character(LEN=500) :: resultpath, npepath +character(LEN=256) :: npes_string +logical :: dir_exist +type(t_mesh) , target, save :: mesh +type(t_partit), target, save :: partit +type(t_ice) , target, save :: ice +integer :: i, n, nzmax, nzmin +real(kind=WP) , allocatable :: UV(:,:,:), wvel(:,:), wvel_i(:,:), wvel_e(:,:) +integer :: node_size, elem_size + +!_______________________________________________________________________________ +resultpath="/work/ollie/pscholz/results_fesom2.0/test_binaryrestart" + +!_______________________________________________________________________________ +call MPI_INIT(i) +call par_init(partit) + +!_______________________________________________________________________________ +! check if resultpath exist +INQUIRE(directory=trim(resultpath), EXIST=dir_exist) +if (.not. dir_exist) then + if (partit%mype==0) print *, achar(27)//'[1;31m'//' -ERROR-> could not find:'//trim(resultpath)//achar(27)//'[0m' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) +end if + +npepath =trim(resultpath)//"/fesom_bin_restart/np"//int_to_txt(partit%npes) +INQUIRE(directory=trim(npepath), EXIST=dir_exist) +if (.not. dir_exist) then + if (partit%mype==0) print *, achar(27)//'[1;31m'//' -ERROR-> could not find:'//trim(npepath)//achar(27)//'[0m' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) +end if + +!_______________________________________________________________________________ +! read derived type binary restart files +call read_all_bin_restarts(npepath, ice=ice, partit=partit, mesh=mesh) + +!_______________________________________________________________________________ +! even though the partitioning has been read some things regarding MPI shall be computed during the runtime +! these include: MPI_TYPE_COMMIT etc. +! used to be call set_par_support(partit, mesh) +call init_mpi_types(partit, mesh) +call init_gatherLists(partit) + +!_______________________________________________________________________________ +node_size=partit%myDim_nod2D +partit%eDim_nod2D +elem_size=partit%myDim_elem2D+partit%eDim_elem2D + +!_______________________________________________________________________________ +do i=1, 10 + if (partit%mype==0) write(*,*) i + !___________________________________________________________________________ + ! Dynamics + select case (ice%whichEVP) + case (0) + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics...'//achar(27)//'[0m' + call EVPdynamics (ice, partit, mesh) + case (1) + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics_m...'//achar(27)//'[0m' + call EVPdynamics_m(ice, partit, mesh) + case (2) + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics_a...'//achar(27)//'[0m' + call EVPdynamics_a(ice, partit, mesh) + case default + if (partit%mype==0) write(*,*) 'a non existing EVP scheme specified!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end select + + !___________________________________________________________________________ + ! Advection + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_TG_rhs_div...'//achar(27)//'[0m' + call ice_TG_rhs_div (ice, partit, mesh) + + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_solve...'//achar(27)//'[0m' + call ice_fct_solve (ice, partit, mesh) + + if (partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_update_for_div...'//achar(27)//'[0m' + call ice_update_for_div(ice, partit, mesh) + +end do + +call par_ex(partit%MPI_COMM_FESOM, partit%mype) + +end program main + diff --git a/dwarf/dwarf_ice/dwarf_linkfiles.sh b/dwarf/dwarf_ice/dwarf_linkfiles.sh new file mode 100755 index 000000000..7e01a16d7 --- /dev/null +++ b/dwarf/dwarf_ice/dwarf_linkfiles.sh @@ -0,0 +1,60 @@ +#!/bin/bash +#_______________________________________________________________________________ +# link environment variables and configure.sh files from the main repository level +ln -s ../../env env +ln -s ../../env.sh env.sh +ln -s ../../configure.sh configure.sh + +#_______________________________________________________________________________ +# create local source folder for the dwarf +if [ -d "src/" ] ; then rm -r src/ ; fi +mkdir src/ +cd src/ + +#_______________________________________________________________________________ +# link main dwarf files to the local src/ folder +ln -s ../dwarf_ini/fesom.F90 fesom.F90 +ln -s ../dwarf_ini/CMakeLists.txt CMakeLists.txt + +#_______________________________________________________________________________ +export which_path="../../../src/" + +# for downloading from specific github branch replace ln -s with wget +# export which_branch=refactoring +# export which_branch=refactoring_dwarf_ice +# export which_path=https://raw.githubusercontent.com/FESOM/fesom2/${which_branch}/src + +#_______________________________________________________________________________ +export which_files="associate_mesh_def.h + associate_mesh_ass.h + associate_part_def.h + associate_part_ass.h + MOD_MESH.F90 + MOD_PARTIT.F90 + MOD_TRACER.F90 + MOD_DYN.F90 + MOD_ICE.F90 + MOD_READ_BINARY_ARRAYS.F90 + MOD_WRITE_BINARY_ARRAYS.F90 + ice_modules.F90 + ice_EVP.F90 + ice_maEVP.F90 + ice_fct.F90 + gen_halo_exchange.F90 + gen_modules_partitioning.F90 + gen_modules_config.F90 + io_restart_derivedtype.F90 + fortran_utils.F90 + oce_modules.F90 + " +#_______________________________________________________________________________ +# link the ther necessary main src files to local src directory +for file in ${which_files}; do + ln -s ${which_path}/${file} ${file} + # wget ${which_path}/${file} + # cp ${which_path}/${file} . +done + +#_______________________________________________________________________________ +cd ../ + diff --git a/dwarf/dwarf_ice/work/job_ollie b/dwarf/dwarf_ice/work/job_ollie new file mode 100755 index 000000000..0fd30349d --- /dev/null +++ b/dwarf/dwarf_ice/work/job_ollie @@ -0,0 +1,36 @@ +#!/bin/bash +#SBATCH --job-name=fesom2.0 +#SBATCH -p mpp +#SBATCH --ntasks=288 +#SBATCH --time=00:05:00 +#SBATCH -o fesom2.0_%x_%j.out +#SBATCH -e fesom2.0_%x_%j.out +module load intel.compiler intel.mpi netcdf/4.4.0_intel +module load centoslibs + +set -x + +ulimit -s unlimited + +# determine JOBID +###JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` +jobid=$(echo $SLURM_JOB_ID | cut -d"." -f1) + +ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x +cp -n ../config/namelist.config . +cp -n ../config/namelist.forcing . +cp -n ../config/namelist.oce . +cp -n ../config/namelist.ice . +cp -n ../config/namelist.io . +cp -n ../config/namelist.icepack . + +date +###srun --mpi=pmi2 ./fesom.x > "fesom2.0.out" +srun --mpi=pmi2 ./fesom.x > "fesom2.0_${SLURM_JOB_NAME}_${jobid}.out" +date + +#qstat -f $PBS_JOBID +#export EXITSTATUS=$? +#if [ ${EXITSTATUS} -eq 0 ] || [ ${EXITSTATUS} -eq 127 ] ; then +#sbatch job_ollie +#fi diff --git a/dwarf/dwarf_tracer/CMakeLists.txt b/dwarf/dwarf_tracer/CMakeLists.txt new file mode 100644 index 000000000..95b7e7b78 --- /dev/null +++ b/dwarf/dwarf_tracer/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 3.4) + +# set default build type cache entry (do so before project(...) is called, which would create this cache entry on its own) +if(NOT CMAKE_BUILD_TYPE) + message(STATUS "setting default build type: Release") + set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.") +endif() + +project(FESOM2.0) +set(TOPLEVEL_DIR ${CMAKE_CURRENT_LIST_DIR}) +set(FESOM_COUPLED OFF CACHE BOOL "compile fesom standalone or with oasis support (i.e. coupled)") +set(OIFS_COUPLED OFF CACHE BOOL "compile fesom coupled to OpenIFS. (Also needs FESOM_COUPLED to work)") +set(CRAY OFF CACHE BOOL "compile with cray ftn") +set(USE_ICEPACK OFF CACHE BOOL "compile fesom with the Iceapck modules for sea ice column physics.") +#set(VERBOSE OFF CACHE BOOL "toggle debug output") +#add_subdirectory(oasis3-mct/lib/psmile) +add_subdirectory(src) diff --git a/dwarf/dwarf_tracer/dwarf_ini/CMakeLists.txt b/dwarf/dwarf_tracer/dwarf_ini/CMakeLists.txt new file mode 100644 index 000000000..c7cedc905 --- /dev/null +++ b/dwarf/dwarf_tracer/dwarf_ini/CMakeLists.txt @@ -0,0 +1,98 @@ +cmake_minimum_required(VERSION 3.4) + +project(fesom Fortran) + +option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) + +# get our source files +set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt +#if(${USE_ICEPACK}) +# file(GLOB sources_Fortran ${src_home}/*.F90 +# ${src_home}/icepack_drivers/*.F90 +# ${src_home}/icepack_drivers/Icepack/columnphysics/*.F90) +#else() +file(GLOB sources_Fortran ${src_home}/*.F90) +#endif() +#list(REMOVE_ITEM sources_Fortran ${src_home}/fesom_partition_init.F90) +#file(GLOB sources_C ${src_home}/*.c) + +# generate a custom file from fesom_version_info.F90 which includes the current git SHA +#set(FESOM_ORIGINAL_VERSION_FILE ${src_home}/fesom_version_info.F90) +#set(FESOM_GENERATED_VERSION_FILE ${CMAKE_CURRENT_BINARY_DIR}/fesom_version_info-generated.F90) +#list(REMOVE_ITEM sources_Fortran ${FESOM_ORIGINAL_VERSION_FILE}) # we want to compile the generated file instead +#list(APPEND sources_Fortran ${FESOM_GENERATED_VERSION_FILE}) +#add_custom_command(OUTPUT 5303B6F4_E4F4_45B2_A6E5_8E2B9FB5CDC4 ${FESOM_GENERATED_VERSION_FILE} # the first arg to OUTPUT is a name for a file we never create to make sure this command will run on every re-build (let our file be the second arg, as the first file is inadvertently removed by make) +# COMMAND ${CMAKE_COMMAND} -DFESOM_ORIGINAL_VERSION_FILE=${FESOM_ORIGINAL_VERSION_FILE} -DFESOM_GENERATED_VERSION_FILE=${FESOM_GENERATED_VERSION_FILE} -P GitRepositoryInfo.cmake +# WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} +# COMMENT "determining ${PROJECT_NAME} git SHA ...") + +#if(${FESOM_STANDALONE}) +# list(REMOVE_ITEM sources_Fortran ${src_home}/cpl_driver.F90) +#endif() +#list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90) +#list(REMOVE_ITEM sources_C ${src_home}/fort_part.c) + +# depends on the metis library +#add_subdirectory(../lib/metis-5.1.0 ${PROJECT_BINARY_DIR}/metis) +#include_directories(../lib/metis-5.1.0/include) +# depends on the parms library +#add_subdirectory(../lib/parms ${PROJECT_BINARY_DIR}/parms) + +#add_subdirectory(async_threads_cpp) + +#include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindNETCDF.cmake) + +#add_library(${PROJECT_NAME}_C ${sources_C}) +#target_compile_definitions(${PROJECT_NAME}_C PRIVATE PARMS USE_MPI REAL=double DBL HAS_BLAS FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 SGI LINUX UNDER_ MPI2) +#target_link_libraries(${PROJECT_NAME}_C parms) #metis + +# create our binary (set its name to name of this project) +add_executable(${PROJECT_NAME} ${sources_Fortran}) +#target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS -DMETIS_VERSION=5 -DPART_WEIGHTED -DMETISRANDOMSEED=35243) +#if(${DISABLE_MULTITHREADING}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE DISABLE_MULTITHREADING) +#endif() +#if(${FESOM_COUPLED}) +# include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindOASIS.cmake) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __oasis) +#endif() +#if(${OIFS_COUPLED}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __oifs) +#endif() +#if(${USE_ICEPACK}) +# target_compile_definitions(${PROJECT_NAME} PRIVATE __icepack) +#endif() +if(${VERBOSE}) + target_compile_definitions(${PROJECT_NAME} PRIVATE VERBOSE) +endif() + + +# CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) +if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) +# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) + target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) +endif() + + +#elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) +# target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -hnoomp) +#endif() +#target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) +#target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) +#target_include_directories(${PROJECT_NAME} PRIVATE ${SCRIP_Fortran_INCLUDE_DIRECTORIES}) +#target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRARIES} ${OASIS_Fortran_LIBRARIES}) +#target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) +#target_link_libraries(${PROJECT_NAME} async_threads_cpp) +#set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) + +set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") +get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) +get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) +install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) diff --git a/dwarf/dwarf_tracer/dwarf_ini/fesom.F90 b/dwarf/dwarf_tracer/dwarf_ini/fesom.F90 new file mode 100755 index 000000000..d6467d29b --- /dev/null +++ b/dwarf/dwarf_tracer/dwarf_ini/fesom.F90 @@ -0,0 +1,89 @@ +!=============================================================================! +! +! Finite Volume Sea-ice Ocean Model +! +!=============================================================================! +! The main driving routine +!=============================================================================! + +program main +USE MOD_MESH +USE MOD_PARTIT +USE MOD_TRACER +USE MOD_DYN +USE MOD_ICE +USE MOD_PARSUP +USE g_comm_auto +USE par_support_interfaces +USE restart_derivedtype_module +USE fortran_utils +IMPLICIT NONE + +character(LEN=500) :: resultpath, npepath +character(LEN=256) :: npes_string +logical :: dir_exist +logical :: L_EXISTS +type(t_mesh), target, save :: mesh +type(t_tracer), target, save :: tracers +type(t_partit), target, save :: partit +type(t_dyn), target, save :: dyn +type(t_ice), target, save :: ice +integer :: i, n, nzmax, nzmin + + +call MPI_INIT(i) +call par_init(partit) + +resultpath='/work/ollie/pscholz/results_fesom2.0/test_binaryrestart' + +! check if resultpath exist +INQUIRE(directory=trim(resultpath), EXIST=dir_exist) +if (.not. dir_exist) then + if (partit%mype==0) print *, achar(27)//'[1;31m'//' -ERROR-> could not find:'//trim(resultpath)//achar(27)//'[0m' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop +end if + +npepath =trim(resultpath)//"/fesom_bin_restart/np"//int_to_txt(partit%npes) +INQUIRE(directory=trim(npepath), EXIST=dir_exist) +if (.not. dir_exist) then + if (partit%mype==0) print *, achar(27)//'[1;31m'//' -ERROR-> could not find:'//trim(npepath)//achar(27)//'[0m' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop +end if + +!_______________________________________________________________________________ +! read derived type binary restart files +call read_all_bin_restarts(npepath, dynamics=dyn, tracers=tracers, partit=partit, mesh=mesh) + +! even though the partitioning has been read some things regarding MPI shall be computed during the runtime +! these include: MPI_TYPE_COMMIT etc. +! used to be call set_par_support(partit, mesh) +call init_mpi_types(partit, mesh) +call init_gatherLists(partit) + +do i=1, 10 + !___________________________________________________________________________ + ! ale tracer advection + tracers%work%del_ttf_advhoriz = 0.0_WP + tracers%work%del_ttf_advvert = 0.0_WP +! if (mype==0) write(*,*) 'start advection part.......' + call do_oce_adv_tra(1.e-3, dyn%uv, dyn%w, dyn%w_i, dyn%w_e, 1, dyn, tracers, partit, mesh) +! if (mype==0) write(*,*) 'advection part completed...' + if (partit%mype==0) write(*,*) minval(tracers%data(1)%values), maxval(tracers%data(1)%values), sum(tracers%data(1)%values) + !_____________________________________________________ + !___________________________________________________________________________ + ! update array for total tracer flux del_ttf with the fluxes from horizontal + ! and vertical advection + tracers%work%del_ttf=tracers%work%del_ttf+tracers%work%del_ttf_advhoriz+tracers%work%del_ttf_advvert + + do n=1, partit%myDim_nod2D + nzmax=mesh%nlevels_nod2D(n)-1 + nzmin=mesh%ulevels_nod2D(n) + tracers%data(1)%values(nzmin:nzmax,n)=tracers%data(1)%values(nzmin:nzmax,n)+tracers%work%del_ttf(nzmin:nzmax,n)/mesh%hnode_new(nzmin:nzmax,n) ! LINFS + end do + call exchange_nod(tracers%data(1)%values(:,:), partit) + call exchange_nod(tracers%data(2)%values(:,:), partit) +end do +call par_ex(partit%MPI_COMM_FESOM, partit%mype) +end program main diff --git a/dwarf/dwarf_tracer/dwarf_linkfiles.sh b/dwarf/dwarf_tracer/dwarf_linkfiles.sh new file mode 100755 index 000000000..a9b254fb4 --- /dev/null +++ b/dwarf/dwarf_tracer/dwarf_linkfiles.sh @@ -0,0 +1,53 @@ +#!/bin/bash +#_______________________________________________________________________________ +ln -s ../../env env +ln -s ../../env.sh env.sh +ln -s ../../configure.sh configure.sh +#_______________________________________________________________________________ +if [ -d "src/" ] ; then rm -r src/ ; fi +# fi +mkdir src/ +cd src/ + +#_______________________________________________________________________________ +ln -s ../dwarf_ini/fesom.F90 fesom.F90 +ln -s ../dwarf_ini/CMakeLists.txt CMakeLists.txt + +#_______________________________________________________________________________ +export which_path="../../../src/" + +# export which_branch=refactoring +# export which_branch=refactoring_dwarf_ice +# export which_path=https://raw.githubusercontent.com/FESOM/fesom2/${which_branch}/src +#_______________________________________________________________________________ +export which_files="associate_mesh_def.h + associate_mesh_ass.h + associate_part_def.h + associate_part_ass.h + MOD_MESH.F90 + MOD_PARTIT.F90 + MOD_TRACER.F90 + MOD_DYN.F90 + MOD_ICE.F90 + MOD_READ_BINARY_ARRAYS.F90 + MOD_WRITE_BINARY_ARRAYS.F90 + io_restart_derivedtype.F90 + fortran_utils.F90 + gen_halo_exchange.F90 + oce_adv_tra_driver.F90 + oce_adv_tra_hor.F90 + oce_modules.F90 + gen_modules_partitioning.F90 + oce_adv_tra_fct.F90 + oce_adv_tra_ver.F90 + " +#_______________________________________________________________________________ +for file in ${which_files}; do + ln -s ${which_path}/${file} ${file} + # wget ${which_path}/${file} + # cp ${which_path}/${file} . +done + +#_______________________________________________________________________________ +cd ../ + diff --git a/dwarf/dwarf_tracer/work/job_ollie b/dwarf/dwarf_tracer/work/job_ollie new file mode 100755 index 000000000..df38d528d --- /dev/null +++ b/dwarf/dwarf_tracer/work/job_ollie @@ -0,0 +1,27 @@ +#!/bin/bash +#SBATCH --job-name=fesom2.0 +#SBATCH -p mpp +#SBATCH --ntasks=72 +#SBATCH --time=00:30:00 +#SBATCH -o slurm-out.out +#SBATCH -e slurm-err.out +module load intel.compiler intel.mpi netcdf/4.4.0_intel +module load centoslibs + +set -x + +ulimit -s unlimited + +# determine JOBID +JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` + +ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x +date +srun --mpi=pmi2 ./fesom.x > "fesom2.0.out" +date + +#qstat -f $PBS_JOBID +#export EXITSTATUS=$? +#if [ ${EXITSTATUS} -eq 0 ] || [ ${EXITSTATUS} -eq 127 ] ; then +#sbatch job_ollie +#fi diff --git a/env.sh b/env.sh index f568651e3..52868001c 100755 --- a/env.sh +++ b/env.sh @@ -27,6 +27,8 @@ fi if [[ $LOGINHOST =~ ^m[A-Za-z0-9]+\.hpc\.dkrz\.de$ ]]; then STRATEGY="mistral.dkrz.de" +elif [[ $LOGINHOST =~ ^l[A-Za-z0-9]+\.atos\.local$ ]]; then + STRATEGY="levante.dkrz.de" elif [[ $LOGINHOST =~ ^ollie[0-9]$ ]] || [[ $LOGINHOST =~ ^prod-[0-9]{4}$ ]]; then STRATEGY="ollie" elif [[ $LOGINHOST =~ ^h[A-Za-z0-9]+\.hsn\.hlrn\.de$ ]]; then @@ -47,6 +49,12 @@ elif [[ $LOGINHOST =~ ^jwlogin[0-9][0-9].juwels$ ]]; then STRATEGY="juwels" elif [[ $LOGINHOST =~ ^cc[a-b]+-login[0-9]+\.ecmwf\.int$ ]]; then STRATEGY="ecaccess.ecmwf.int" +elif [[ $LOGINHOST =~ ^stco-esl[0-9]+$ ]]; then + STRATEGY="aleph" +elif [[ $LOGINHOST =~ ^[A-Za-z0-9]+\.ecmwf\.int$ ]]; then +STRATEGY="wsecmwf" +elif [[ $LOGINHOST =~ \.bullx$ ]]; then +STRATEGY="atosecmwf" else echo "can not determine environment for host: "$LOGINHOST [ $BEING_EXECUTED = true ] && exit 1 @@ -68,5 +76,6 @@ if [ $BEING_EXECUTED = true ]; then echo $DIR/env/$STRATEGY else # file is being sourced + export FESOM_PLATFORM_STRATEGY=$STRATEGY source $DIR/env/$STRATEGY/shell fi diff --git a/env/aleph/shell b/env/aleph/shell new file mode 100644 index 000000000..874bdb086 --- /dev/null +++ b/env/aleph/shell @@ -0,0 +1,35 @@ +module unload craype +module load craype/2.6.2 + +module load PrgEnv-cray/6.0.4 +module load alps pbs +module load cray-mpich/7.7.3 +module load craype-x86-skylake +module load cmake/3.14.0 +module load cray-hdf5-parallel/1.10.2.0 +module load cray-netcdf-hdf5parallel/4.6.1.3 +module load fftw/2.1.5.9 +module load proj4/5.1.0 + +export HDF5ROOT=$HDF5_ROOT +export NETCDFFROOT=$NETCDF_DIR +export NETCDFROOT=$NETCDF_DIR +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$NETCDFROOT/include +export NETCDF_CXX_INCLUDE_DIRECTORIES=$NETCDFROOT/include +export NETCDF_CXX_LIBRARIES=$NETCDFROOT/lib +export PERL5LIB=/usr/lib64/perl5 +export XML2ROOT=/usr +export ZLIBROOT=/usr +export TMPDIR=/tmp +export PMI_LABEL_ERROUT=1 +export DR_HOOK_IGNORE_SIGNALS=-1 + + +# enable full MPI thread support level (MPI_THREAD_MULTIPLE) +export MPICH_MAX_THREAD_SAFETY=multiple # to also switch to an alternative (probably with faster locking) multi threading implementation of the cray-mpich library, use the compiler flag -craympich-mt +export MPICH_CRAY_OPT_THREAD_SYNC=0 # the Cray MPICH library falls back to using the pthread_mutex-based thread-synchronization implementation +export MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC (but this one brings a huge speed gain on aleph) + +export ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS='' + +export FC=ftn CC=cc CXX=CC diff --git a/env/atosecmwf/shell b/env/atosecmwf/shell new file mode 100644 index 000000000..3c5efc9c9 --- /dev/null +++ b/env/atosecmwf/shell @@ -0,0 +1,39 @@ +# used at ECMWF + +module unload metview +module unload emos +module unload eccodes +module unload fftw +module unload openmpi +module unload boost +module unload fcm +module unload hdf5 +module unload netcdf +module unload netcdf4 +module unload python3 +module unload nag +module unload gnu +module unload clang +module unload intel +module unload cmake +module unload prgenv +module unload gcc + +# Load modules +module load prgenv/intel +module load intel/2021.2.0 +module load hpcx-openmpi/2.9.0 +module load intel-mkl/19.0.5 +module load fftw/3.3.9 +module load netcdf4/4.7.4 +module load hdf5/1.10.6 +module load boost/1.71.0 +module load eigen/3.3.7 +module load cmake/3.20.2 +module load ninja/1.10.0 +module load fcm/2019.05.0 + +export NETCDF_DIR=$NETCDF4_DIR + +export FC=mpif90 CC=mpicc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly +#export FC=mpif90 CC=gcc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly diff --git a/env/ecaccess.ecmwf.int/shell b/env/ecaccess.ecmwf.int/shell index 743116a12..6a28b2f1c 100644 --- a/env/ecaccess.ecmwf.int/shell +++ b/env/ecaccess.ecmwf.int/shell @@ -1,11 +1,46 @@ -export PATH=/home/rd/natr/cmake-3.11.2-Linux-x86_64/bin:$PATH - +module unload grib_api +module unload eccodes +module unload emos +module unload cmake +module unload fftw +module unload fcm +module unload netcdf4 +module unload netcdf4-parallel +module unload hdf5-parallel +module unload cray-netcdf-hdf5parallel +module unload cray-hdf5-parallel +module unload cray-netcdf module unload cray-hdf5 -module load cray-netcdf -module load cray-hdf5 +module unload python +module unload python3 +module unload boost +module unload ecbuild +module unload ifs-support +module unload fcm +module unload cdt +module unload cmake +module unload gcc + +export EC_CRAYPE_INTEGRATION=off + +# Load modules +module load cdt/18.12 +module load gcc/6.3.0 +module load fftw/3.3.4.5 +module load netcdf4-parallel/4.6.2 +module load hdf5-parallel/1.10.4 +module load fcm/2015.02.0 +module load python/2.7.12-01 +module load python3/3.6.8-01 +module load boost/1.61.0 +module load eigen/3.2.0 +module load nag +module load parmetis +module load cray-snplauncher +module load atp +module load ninja +module load cmake/3.15.0 #export CRAYPE_LINK_TYPE=dynamic -# enable full MPI thread support level (MPI_THREAD_MULTIPLE) -export MPICH_MAX_THREAD_SAFETY=multiple # to also switch to an alternative (probably with faster locking) multi threading implementation of the cray-mpich library, use the compiler flag -craympich-mt export FC=ftn CC=cc CXX=CC diff --git a/env/juwels/shell b/env/juwels/shell index 0b5451c82..7217017ff 100644 --- a/env/juwels/shell +++ b/env/juwels/shell @@ -1,22 +1,44 @@ ########## -module --force purge -module use /gpfs/software/juwels/otherstages -module load Stages/2019a -module load StdEnv -# For intel MPI -#module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 -#export FC=mpiifort CC=mpiicc CXX=mpiicpc +module --force purge +module use /gpfs/software/juwels/otherstages +module load Stages/2020 +module load Intel/2020.2.254-GCC-9.3.0 +module load ParaStationMPI/5.4.7-1 +module load CMake/3.18.0 +module load imkl/2020.2.254 +module load netCDF-Fortran/4.5.3 +module load netCDF/4.7.4 +module load Perl/5.32.0 +module load netCDF -# For ParaStation MPI -module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 -export FC=mpifort CC=mpicc CXX=mpicxx +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +export FCFLAGS=-free +export CC=mpicc +export CXX=mpic++ -module load netCDF/4.6.3 -module load netCDF-Fortran/4.4.5 -module load CMake -export NETCDF_DIR=$EBROOTNETCDF -export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN -export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ -export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ -export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$EBROOTNETCDFMINFORTRAN/include +export NETCDF_Fortran_LIBRARIES=$EBROOTNETCDFMINFORTRAN/lib +#module use /gpfs/software/juwels/otherstages +#module load Stages/2019a +#module load StdEnv +## For intel MPI +##module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 +##export FC=mpiifort CC=mpiicc CXX=mpiicpc + +## For ParaStation MPI +#module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 +#export FC=mpifort CC=mpicc CXX=mpicxx + +#module load netCDF/4.6.3 +#module load netCDF-Fortran/4.4.5 +#module load CMake +#export NETCDF_DIR=$EBROOTNETCDF +#export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN +#export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ +#export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ +#export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ diff --git a/env/juwels/shell_2020+intel+cluster b/env/juwels/shell_2020+intel+cluster new file mode 100644 index 000000000..7217017ff --- /dev/null +++ b/env/juwels/shell_2020+intel+cluster @@ -0,0 +1,44 @@ +########## +module --force purge +module use /gpfs/software/juwels/otherstages +module load Stages/2020 +module load Intel/2020.2.254-GCC-9.3.0 +module load ParaStationMPI/5.4.7-1 +module load CMake/3.18.0 +module load imkl/2020.2.254 +module load netCDF-Fortran/4.5.3 +module load netCDF/4.7.4 +module load Perl/5.32.0 +module load netCDF + +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +export FCFLAGS=-free +export CC=mpicc +export CXX=mpic++ + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$EBROOTNETCDFMINFORTRAN/include +export NETCDF_Fortran_LIBRARIES=$EBROOTNETCDFMINFORTRAN/lib + +#module use /gpfs/software/juwels/otherstages +#module load Stages/2019a +#module load StdEnv +## For intel MPI +##module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 +##export FC=mpiifort CC=mpiicc CXX=mpiicpc + +## For ParaStation MPI +#module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 +#export FC=mpifort CC=mpicc CXX=mpicxx + +#module load netCDF/4.6.3 +#module load netCDF-Fortran/4.4.5 +#module load CMake +#export NETCDF_DIR=$EBROOTNETCDF +#export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN +#export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ +#export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ +#export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ diff --git a/env/juwels/shell_2022+intel b/env/juwels/shell_2022+intel new file mode 100644 index 000000000..3ffbc7970 --- /dev/null +++ b/env/juwels/shell_2022+intel @@ -0,0 +1,21 @@ +########## +module --force purge +module use $OTHERSTAGES +module load Stages/2022 +module load Intel/2021.4.0 +module load ParaStationMPI/5.5.0-1 +module load CMake/3.21.1 +module load imkl/2021.4.0 +module load netCDF-Fortran/4.5.3 +module load netCDF/4.8.1 +module load Perl/5.34.0 + +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +export CC=mpicc +export CXX=mpic++ + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$EBROOTNETCDFMINFORTRAN/include diff --git a/env/juwels/shell_2022+intel+customnetcdf b/env/juwels/shell_2022+intel+customnetcdf new file mode 100644 index 000000000..c49641b3b --- /dev/null +++ b/env/juwels/shell_2022+intel+customnetcdf @@ -0,0 +1,22 @@ +########## +module --force purge +module use $OTHERSTAGES +module load Stages/2022 +module load Intel/2021.4.0 +module load ParaStationMPI/5.5.0-1 +module load CMake/3.21.1 +module load imkl/2021.4.0 + +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +export CC=mpicc +export CXX=mpic++ + +export IO_LIB_ROOT=/p/project/pra127/rackow1/RAPS20_fesom/flexbuild/external/intel.juwels/install +export LD_LIBRARY_PATH=${IO_LIB_ROOT}/lib:$LD_LIBRARY_PATH +export NETCDF_Fortran_INCLUDE_DIRECTORIES=${IO_LIB_ROOT}/include +export NETCDF_C_INCLUDE_DIRECTORIES=${IO_LIB_ROOT}/include +export NETCDF_CXX_INCLUDE_DIRECTORIES=${IO_LIB_ROOT}/include diff --git a/env/juwels/shell_2022+nvfortran b/env/juwels/shell_2022+nvfortran new file mode 100644 index 000000000..98e8a6db7 --- /dev/null +++ b/env/juwels/shell_2022+nvfortran @@ -0,0 +1,44 @@ +########## +module --force purge +module use /gpfs/software/juwels/otherstages +module load Stages/2022 +module load NVHPC/22.1 # older versions of pgf90/nvfortran can not compile mixed assumed shape and assumed rank parameters +module load ParaStationMPI/5.5.0-1 +module load CMake/3.21.1 +#module load imkl/2021.2.0 +module load netCDF-Fortran/4.5.3 +module load netCDF/4.8.1 +module load Perl/5.34.0 + +export LC_ALL=en_US.UTF-8 +export TMPDIR=/tmp +export FC=mpifort +export F77=mpifort +export MPIFC=mpifort +#export FCFLAGS=-free +export CC=mpicc +export CXX=mpic++ + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=$EBROOTNETCDFMINFORTRAN/include +export NETCDF_Fortran_LIBRARIES=$EBROOTNETCDFMINFORTRAN/lib + +#module use /gpfs/software/juwels/otherstages +#module load Stages/2019a +#module load StdEnv +## For intel MPI +##module load Intel/2019.3.199-GCC-8.3.0 IntelMPI/2018.5.288 imkl/2019.3.199 +##export FC=mpiifort CC=mpiicc CXX=mpiicpc + +## For ParaStation MPI +#module load Intel/2019.3.199-GCC-8.3.0 ParaStationMPI/5.4 imkl/2019.5.281 +#export FC=mpifort CC=mpicc CXX=mpicxx + +#module load netCDF/4.6.3 +#module load netCDF-Fortran/4.4.5 +#module load CMake +#export NETCDF_DIR=$EBROOTNETCDF +#export NETCDFF_DIR=$EBROOTNETCDFMINFORTRAN +#export NETCDF_Fortran_INCLUDE_DIRECTORIES=${NETCDFF_DIR}/include/ +#export NETCDF_C_INCLUDE_DIRECTORIES=${NETCDF_DIR}/include/ +#export NETCDF_CXX_INCLUDE_DIRECTORIES=${NETCDFCXX_DIR}/include/ + diff --git a/env/levante.dkrz.de/shell b/env/levante.dkrz.de/shell new file mode 100755 index 000000000..c303251dc --- /dev/null +++ b/env/levante.dkrz.de/shell @@ -0,0 +1,33 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others +export LC_ALL=en_US.UTF-8 +export CPU_MODEL=AMD_EPYC_ZEN3 + +module load intel-oneapi-compilers/2022.0.1-gcc-11.2.0 +module load intel-oneapi-mkl/2022.0.1-gcc-11.2.0 +module load openmpi/4.1.2-intel-2021.5.0 +export FC=mpif90 CC=mpicc CXX=mpicxx ; +export LD_LIBRARY_PATH=/sw/spack-levante/intel-oneapi-mkl-2022.0.1-ttdktf/mkl/2022.0.1/lib/intel64:$LD_LIBRARY_PATH + +module load netcdf-c/4.8.1-openmpi-4.1.2-intel-2021.5.0 +module load netcdf-fortran/4.5.3-openmpi-4.1.2-intel-2021.5.0 +module load git # to be able to determine the fesom git SHA when compiling + +ulimit -s unlimited # without setting the stack size we get a segfault from the levante netcdf library at runtime +ulimit -c 0 # do not create a coredump after a crash + +# environment for Open MPI 4.0.0 and later from https://docs.dkrz.de/doc/levante/running-jobs/runtime-settings.html +export OMPI_MCA_pml="ucx" +export OMPI_MCA_btl=self +export OMPI_MCA_osc="pt2pt" +export UCX_IB_ADDR_TYPE=ib_global +# for most runs one may or may not want to disable HCOLL +export OMPI_MCA_coll="^ml,hcoll" +export OMPI_MCA_coll_hcoll_enable="0" +export HCOLL_ENABLE_MCAST_ALL="0" +export HCOLL_MAIN_IB=mlx5_0:1 +export UCX_NET_DEVICES=mlx5_0:1 +export UCX_TLS=mm,knem,cma,dc_mlx5,dc_x,self +export UCX_UNIFIED_MODE=y +export HDF5_USE_FILE_LOCKING=FALSE +export OMPI_MCA_io="romio321" +export UCX_HANDLE_ERRORS=bt diff --git a/env/mistral.dkrz.de/shell-intel+openmpi b/env/mistral.dkrz.de/shell-intel+openmpi new file mode 100644 index 000000000..d4227b601 --- /dev/null +++ b/env/mistral.dkrz.de/shell-intel+openmpi @@ -0,0 +1,50 @@ +# make the contents as shell agnostic as possible so we can include them with bash, zsh and others + +module load gcc/4.8.2 +export LD_LIBRARY_PATH=/sw/rhel6-x64/gcc/gcc-4.8.2/lib64:$LD_LIBRARY_PATH # avoid GLIBCXX_3.4.15 not found error +module unload intel && module load intel/18.0.4 + +export FC=mpif90 CC=mpicc CXX=mpicxx; module unload intelmpi; module load openmpi/2.0.2p2_hpcx-intel14 + +# from https://www.dkrz.de/up/systems/mistral/running-jobs/mpi-runtime-settings +export I_MPI_FABRICS=shm:dapl +export I_MPI_FALLBACK=disable +export I_MPI_SLURM_EXT=0 # disable optimized startup algorithm for intel MPI +export I_MPI_LARGE_SCALE_THRESHOLD=8192 # set to a value larger than the number of MPI-tasks used !!! +export I_MPI_DYNAMIC_CONNECTION=1 +export I_MPI_CHECK_DAPL_PROVIDER_COMPATIBILITY=0 +export I_MPI_HARD_FINALIZE=1 +export I_MPI_ADJUST_GATHER=1 # do not use =3 (Shumilin's algorithm) + +export MXM_LOG_LEVEL=ERROR # try to disable "Conflicting CPU frequencies detected" messages from OpenMPI + +module unload netcdf && module load netcdf_c/4.3.2-gcc48 +module unload cmake && module load cmake +# we will get a segfault at runtime if we use a gcc from any of the provided gcc modules +export PATH=/sw/rhel6-x64/gcc/binutils-2.24-gccsys/bin:${PATH} + +export NETCDF_Fortran_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_fortran-4.4.2-intel14/include +export NETCDF_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_c-4.3.2-intel14/include +export NETCDF_CXX_INCLUDE_DIRECTORIES=/sw/rhel6-x64/netcdf/netcdf_cxx-4.2.1-gcc48/include + +export HDF5_C_INCLUDE_DIRECTORIES=/sw/rhel6-x64/hdf5/hdf5-1.8.14-threadsafe-intel14/include + + + +### not sure if the following settings are really necessary to run the coupled awicm3 + +export GRIBROOT=/pf/a/a270092/ecmwf/grib_api_intel_modulegcc +export NETCDFFROOT=/sw/rhel6-x64/netcdf/netcdf_fortran-4.4.3-intel14 +export NETCDFROOT=/sw/rhel6-x64/netcdf/netcdf_c-4.4.0-gcc48 +export HDF5ROOT=/sw/rhel6-x64/hdf5/hdf5-1.8.14-threadsafe-gcc48 +export SZIPROOT=/sw/rhel6-x64/sys/libaec-0.3.2-gcc48 +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${NETCDFFROOT}/lib:${HDF5ROOT}/lib:${NETCDFROOT}/lib:${SZIPROOT}/lib:${GRIBROOT}/lib + +export FESOM_USE_CPLNG='active' + +export DR_HOOK=1 +export DR_HOOK_IGNORE_SIGNALS='-1' +export DR_HOOK_OPT=prof +export DR_HOOK_PROFILE_LIMIT=0.5 +export OIFS_DUMMY_ACTION=ABORT +export HDF5_DISABLE_VERSION_CHECK=1 diff --git a/env/wsecmwf/shell b/env/wsecmwf/shell new file mode 100644 index 000000000..ddcd1c13f --- /dev/null +++ b/env/wsecmwf/shell @@ -0,0 +1,13 @@ +# used at ECMWF + +module unload openmpi +module unload eccodes +module unload netcdf4 +module unload hdf5 +module switch gnu/7.3.0 +module load netcdf4/4.4.1 +module load hdf5/1.8.17 +module load openmpi/2.1.3 + +export FC=mpif90 CC=mpicc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly +#export FC=mpif90 CC=gcc CXX=mpicxx # MPI wrappers for Fortran, cc and CC similarly diff --git a/lib/metis-5.1.0/GKlib/GKlibSystem.cmake b/lib/metis-5.1.0/GKlib/GKlibSystem.cmake index 3fcc29108..6993c7c69 100644 --- a/lib/metis-5.1.0/GKlib/GKlibSystem.cmake +++ b/lib/metis-5.1.0/GKlib/GKlibSystem.cmake @@ -37,6 +37,8 @@ if(CMAKE_COMPILER_IS_GNUCC) elseif(${CMAKE_C_COMPILER_ID} MATCHES "Sun") # Sun insists on -xc99. set(GKlib_COPTIONS "${GKlib_COPTIONS} -xc99") +elseif(${CMAKE_C_COMPILER_ID} MATCHES "Cray") + set(GKlib_COPTIONS "${GKlib_COPTIONS} -fPIC") endif(CMAKE_COMPILER_IS_GNUCC) # Find OpenMP if it is requested. diff --git a/lib/parms/CMakeLists.txt b/lib/parms/CMakeLists.txt index 6959573f5..3cb7b48cf 100644 --- a/lib/parms/CMakeLists.txt +++ b/lib/parms/CMakeLists.txt @@ -9,7 +9,11 @@ file(GLOB all_sources ${src_home}/src/*.c ${src_home}/src/DDPQ/*.c) include("${CMAKE_CURRENT_LIST_DIR}/../../cmake/FindBLAS.cmake") # create our library (set its name to name of this project) -add_library(${PROJECT_NAME} ${all_sources}) +if(${BUILD_FESOM_AS_LIBRARY}) + add_library(${PROJECT_NAME} ${all_sources}) +else() + add_library(${PROJECT_NAME} ${all_sources}) +endif() target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS USE_MPI REAL=double DBL FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 HAS_BLAS) target_include_directories(${PROJECT_NAME} PRIVATE ${src_home}/src/../include ${src_home}/src/include @@ -18,4 +22,11 @@ target_include_directories(${PROJECT_NAME} target_link_libraries(${PROJECT_NAME} INTERFACE ${BLAS_C_LIBRARIES} $ENV{UBUNTU_BLAS_LIBRARY}) if(${CMAKE_C_COMPILER_ID} STREQUAL "Intel") target_compile_options(${PROJECT_NAME} PRIVATE -no-prec-div -no-prec-sqrt -fast-transcendentals -fp-model precise) + if(${FESOM_PLATFORM_STRATEGY} STREQUAL levante.dkrz.de ) + target_compile_options(${PROJECT_NAME} PRIVATE -march=core-avx2 -mtune=core-avx2) + endif() +endif() +if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_options(${PROJECT_NAME} PRIVATE -fPIC) + install(TARGETS ${PROJECT_NAME} DESTINATION "${FESOM_INSTALL_PREFIX}/lib") endif() diff --git a/lib/parms/src/DDPQ/ilutpC.c b/lib/parms/src/DDPQ/ilutpC.c index a384f6a89..d158168d8 100755 --- a/lib/parms/src/DDPQ/ilutpC.c +++ b/lib/parms/src/DDPQ/ilutpC.c @@ -298,7 +298,7 @@ msg_timer_start(&t27); len = lenl > fil5 ? fil5 : lenl; ilusch->L->nnzrow[ii] = len; if (lenl > len) - qsplitC(w, jw, lenl, len); + qsplitCF(w, jw, lenl, len); /* printf(" row %d length of L = %d",ii,len); */ @@ -328,7 +328,7 @@ msg_timer_start(&t28); len = lenu > fil6 ? fil6 : lenu; ilusch->U->nnzrow[ii] = len; if (lenu > len+1) - qsplitC(&w[ii+1], &jw[ii+1], lenu-1, len); + qsplitCF(&w[ii+1], &jw[ii+1], lenu-1, len); ilusch->U->pa[ii] = (FLOAT *) Malloc(len*sizeof(FLOAT), "ilutpC:7" ); ilusch->U->pj[ii] = (int *) Malloc(len*sizeof(int), "ilutpC:8" ); /*--------------------------------------------------------------------- @@ -686,7 +686,7 @@ int ilutD(csptr amat, double *droptol, int *lfil, ilutptr ilusch) lenl = len > fil5 ? fil5 : len; ilusch->L->nnzrow[ii] = lenl; if (len > lenl) - qsplitC(w, jw, len, lenl); + qsplitCF(w, jw, len, lenl); if (len > 0) { ilusch->L->pj[ii] = (int *) Malloc(lenl*sizeof(int), "ilutD:4" ); ilusch->L->pa[ii] = (FLOAT *) Malloc(lenl*sizeof(FLOAT), "ilutD:5"); @@ -711,7 +711,7 @@ int ilutD(csptr amat, double *droptol, int *lfil, ilutptr ilusch) ilusch->U->nnzrow[ii] = lenu; jpos = lenu-1; if (len > jpos) - qsplitC(w, jw, len, jpos); + qsplitCF(w, jw, len, jpos); ilusch->U->pa[ii] = (FLOAT *) Malloc(lenu*sizeof(FLOAT), "ilutD:6" ); ilusch->U->pj[ii] = (int *) Malloc(lenu*sizeof(int), "ilutD:7" ); if(ABS_VALUE(t) <= DBL_EPSILON) t= tnorm; diff --git a/lib/parms/src/DDPQ/misc.c b/lib/parms/src/DDPQ/misc.c index 1713c8e00..955af1f98 100755 --- a/lib/parms/src/DDPQ/misc.c +++ b/lib/parms/src/DDPQ/misc.c @@ -10,55 +10,6 @@ #define DBL_EPSILON 2.2204460492503131e-16 // double epsilon -int qsplitC(FLOAT *a, int *ind, int n, int ncut) -{ -/*---------------------------------------------------------------------- -| does a quick-sort split of a complex real array. -| on input a[0 : (n-1)] is a real array -| on output is permuted such that its elements satisfy: -| -| abs(a[i]) >= abs(a[ncut-1]) for i < ncut-1 and -| abs(a[i]) <= abs(a[ncut-1]) for i > ncut-1 -| -| ind[0 : (n-1)] is an integer array permuted in the same way as a. -|---------------------------------------------------------------------*/ - FLOAT tmp; - double abskey; - int j, itmp, first, mid, last; - first = 0; - last = n-1; - if (ncutlast) return 0; -/* outer loop -- while mid != ncut */ -label1: - mid = first; - abskey = ABS_VALUE(a[mid]); - for (j=first+1; j<=last; j++) { - if (ABS_VALUE(a[j]) > abskey) { - tmp = a[++mid]; - itmp = ind[mid]; - a[mid] = a[j]; - ind[mid] = ind[j]; - a[j] = tmp; - ind[j] = itmp; - } - } -/*-------------------- interchange */ - tmp = a[mid]; - a[mid] = a[first]; - a[first] = tmp; - itmp = ind[mid]; - ind[mid] = ind[first]; - ind[first] = itmp; -/*-------------------- test for while loop */ - if (mid == ncut) return 0; - if (mid > ncut) - last = mid-1; - else - first = mid+1; - goto label1; -} -/*--------------- end of zqsplitC ----------------------------------------*/ - int SparTran(csptr amat, csptr bmat, int job, int flag) { /*---------------------------------------------------------------------- diff --git a/lib/parms/src/DDPQ/piluNEW.c b/lib/parms/src/DDPQ/piluNEW.c index f104927fd..36f219a58 100755 --- a/lib/parms/src/DDPQ/piluNEW.c +++ b/lib/parms/src/DDPQ/piluNEW.c @@ -338,7 +338,7 @@ int pilu(p4ptr amat, csptr B, csptr C, double *droptol, lenl = len > fil0 ? fil0 : len; amat->L->nnzrow[ii] = lenl; if (lenl < len) - qsplitC(w, jw, len, lenl); + qsplitCF(w, jw, len, lenl); if (len > 0) { amat->L->pj[ii] = (int *) Malloc(lenl*sizeof(int), "pilu:10" ); amat->L->pa[ii] = (FLOAT *) Malloc(lenl*sizeof(FLOAT), "pilu:11" ); @@ -363,7 +363,7 @@ int pilu(p4ptr amat, csptr B, csptr C, double *droptol, amat->U->nnzrow[ii] = lenu; jpos = lenu-1; if (jpos < len) - qsplitC(w, jw, len, jpos); + qsplitCF(w, jw, len, jpos); amat->U->pa[ii] = (FLOAT *) Malloc(lenu*sizeof(FLOAT), "pilu:12" ); amat->U->pj[ii] = (int *) Malloc(lenu*sizeof(int), "pilu:13" ); if(ABS_VALUE(t) <= DBL_EPSILON) t = rnorm; //(0.0001+drop1); @@ -387,7 +387,7 @@ int pilu(p4ptr amat, csptr B, csptr C, double *droptol, } lenu = len > fil2 ? fil2 : len; if (lenu < len) - qsplitC(w, jw, len, lenu); + qsplitCF(w, jw, len, lenu); lflen[ii] = lenu; if (lenu > 0) { @@ -586,7 +586,7 @@ int pilu(p4ptr amat, csptr B, csptr C, double *droptol, schur->nnzrow[ii] = lenu; jpos = lenu; if (jpos < len) - qsplitC(w, jw, len, jpos); + qsplitCF(w, jw, len, jpos); schur->pa[ii] = (FLOAT *) Malloc(lenu*sizeof(FLOAT), "pilu:16" ); schur->pj[ii] = (int *) Malloc(lenu*sizeof(int), "pilu:17" ); /*--------------------------------------------------------------------- diff --git a/lib/parms/src/DDPQ/protos.h b/lib/parms/src/DDPQ/protos.h index 22434772a..507b240e7 100755 --- a/lib/parms/src/DDPQ/protos.h +++ b/lib/parms/src/DDPQ/protos.h @@ -94,7 +94,7 @@ extern void qsortC(int *ja, FLOAT *ma, int left, int right, int abval); extern void qsortR2I(double *wa, int *cor1, int *cor2, int left, int right); -extern int qsplitC(FLOAT *a, int *ind, int n, int ncut); +extern int qsplitCF(FLOAT *a, int *ind, int n, int ncut); extern int roscalC(csptr mata, double *diag, int nrm); extern void swapj(int v[], int i, int j); extern void swapm(FLOAT v[], int i, int j); diff --git a/lib/parms/src/DDPQ/systimer.c b/lib/parms/src/DDPQ/systimer.c new file mode 100644 index 000000000..e10a154b4 --- /dev/null +++ b/lib/parms/src/DDPQ/systimer.c @@ -0,0 +1,9 @@ +#include +#include + +/* Missing sys_timer for shared libraries */ +double sys_timer() { + clock_t t; + t = clock(); + return ((double)t) / CLOCKS_PER_SEC; +} diff --git a/lib/parms/src/parms_ilu_vcsr.c b/lib/parms/src/parms_ilu_vcsr.c index e9dd64c77..116aa51ba 100755 --- a/lib/parms/src/parms_ilu_vcsr.c +++ b/lib/parms/src/parms_ilu_vcsr.c @@ -16,7 +16,7 @@ typedef struct parms_ilu_data { } *parms_ilu_data; /* -int qsplitC(FLOAT *a, int *ind, int n, int ncut); +int qsplitCF(FLOAT *a, int *ind, int n, int ncut); void qqsort(int *ja, FLOAT *ma, int left, int right); */ @@ -1436,7 +1436,7 @@ int parms_ilut_vcsr(parms_Mat self, parms_FactParam param, void *mat, #endif /* quick sort */ if (len > lenl) { - qsplitC(w,jw,len,lenl); + qsplitCF(w,jw,len,lenl); } if (lenl > 0) { @@ -1471,7 +1471,7 @@ int parms_ilut_vcsr(parms_Mat self, parms_FactParam param, void *mat, lenu = len + 1 > fill ? fill: len + 1; jpos = lenu - 1; if (len > jpos) - qsplitC(&w[ii+start+1], &jw[ii+start+1], len, jpos); + qsplitCF(&w[ii+start+1], &jw[ii+start+1], len, jpos); data->U->nnzrow[start+ii] = lenu; diff --git a/lib/parms/src/parms_pc_schurras.c b/lib/parms/src/parms_pc_schurras.c index bb37885e6..979acdfe9 100755 --- a/lib/parms/src/parms_pc_schurras.c +++ b/lib/parms/src/parms_pc_schurras.c @@ -324,6 +324,9 @@ int parms_PCCreate_Schurras(parms_PC self) +int parms_OperatorGetU(parms_Operator, void **); +int parms_MatGetOffDiag(parms_Mat, void **); +int parms_CommGetOdvlist(parms_Comm, int **); static int parms_PC_GetS(parms_PC self, parms_Operator op,parms_Mat *mat) { diff --git a/lib/parms/src/parms_qsplit.c b/lib/parms/src/parms_qsplit.c index 472c042b0..55cbb8e4b 100755 --- a/lib/parms/src/parms_qsplit.c +++ b/lib/parms/src/parms_qsplit.c @@ -6,7 +6,7 @@ #include #endif -int qsplitC(FLOAT *a, int *ind, int n, int ncut) +int qsplitCF(FLOAT *a, int *ind, int n, int ncut) { /*---------------------------------------------------------------------- | does a quick-sort split of a real array. diff --git a/mesh_part/CMakeLists.txt b/mesh_part/CMakeLists.txt index 81bbe0cc4..54cdc4b59 100644 --- a/mesh_part/CMakeLists.txt +++ b/mesh_part/CMakeLists.txt @@ -4,7 +4,7 @@ project(fesom_ini C Fortran) # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}/../src) -set(sources_Fortran ${src_home}/MOD_MESH.F90 ${src_home}/oce_modules.F90 ${src_home}/gen_modules_config.F90 ${src_home}/gen_modules_partitioning.F90 ${src_home}/gen_modules_rotate_grid.F90 ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90) +set(sources_Fortran ${src_home}/MOD_MESH.F90 ${src_home}/oce_modules.F90 ${src_home}/gen_modules_config.F90 ${src_home}/gen_modules_partitioning.F90 ${src_home}/gen_modules_rotate_grid.F90 ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90 ${src_home}/MOD_READ_BINARY_ARRAYS.F90 ${src_home}/MOD_WRITE_BINARY_ARRAYS.F90 ${src_home}/MOD_PARTIT.F90) set(sources_C ${src_home}/fort_part.c) diff --git a/setups/paths.yml b/setups/paths.yml index b352c7473..963785b8c 100644 --- a/setups/paths.yml +++ b/setups/paths.yml @@ -54,17 +54,34 @@ docker: lnodename: - ' ' meshes: - pi: /fesom/pi/ test_souf: ./test/meshes/soufflet/ test_global: ./test/meshes/pi/ forcing: - CORE2: /fesom/dCORE2/ - JRA55: /fesom/dJRA55/ test_global: ./test/input/global/ clim: - phc: /fesom/phc3/ test_global: ./test/input/global/ opath: - opath: ../results/ + opath: ./test/ +juwels: + lnodename: + - 'jwlogin*' + meshes: + test_global: ./test/meshes/pi/ + test_souf: ./test/meshes/soufflet/ + pi: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/pi/ + core2: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/core2/ + mr: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/mr/ + hr: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/hr/ + orca25: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/orca25/ + farc: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/meshes/farc/ + forcing: + test_global: ./test/input/global/ + CORE2: /p/project/chhb19/hhb192/forcing/forcing/CORE2/ + JRA55: /p/project/chhb19/hhb192/forcing/forcing/JRA55-do-v1.4.0/ + clim: + test_global: ./test/input/global/ + phc: /p/project/chhb19/hhb192/meshes/POOL/FESOM2/hydrography/phc3.0/ + opath: + opath: /p/scratch/chhb19/${USER}/ diff --git a/setups/test_pi/setup.yml b/setups/test_pi/setup.yml index e7f20c760..8856bcb4f 100644 --- a/setups/test_pi/setup.yml +++ b/setups/test_pi/setup.yml @@ -19,11 +19,9 @@ namelist.config: restart_length_unit: "d" logfile_outfreq: 10 -namelist.oce: - oce_dyn: - Div_c: 0.5 - Leith_c: 0.05 - w_split: True +namelist.dyn: + dynamics_general: + use_wsplit: True namelist.ice: ice_dyn: @@ -61,17 +59,12 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.26911274194532003 - salt: 23.9440531023692 - temp: 1.7017743034836539 - sst: 8.532529081624512 - u: -0.0014065854610620704 - v: 0.00014195144238082126 - - - - - + a_ice: 0.2692498167543513 + salt: 23.944089812055452 + sst: 8.526792796340805 + temp: 1.7018189804276316 + u: -0.0014310701355284717 + v: 0.00014314237674481877 diff --git a/setups/test_pi_floatice/setup.yml b/setups/test_pi_floatice/setup.yml new file mode 100644 index 000000000..5a0efdaf6 --- /dev/null +++ b/setups/test_pi_floatice/setup.yml @@ -0,0 +1,78 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + run_config: + use_floatice: True + +namelist.dyn: + dynamics_general: + use_wsplit: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.2688036133133268 + salt: 23.943629925697905 + sst: 8.509603317707892 + temp: 1.7010248191306683 + u: -0.005721009166639396 + v: 0.00047684416150415605 + + + + + + + + + diff --git a/setups/test_pi_icepack/setup.yml b/setups/test_pi_icepack/setup.yml index 9180b3d59..3bc1c4deb 100644 --- a/setups/test_pi_icepack/setup.yml +++ b/setups/test_pi_icepack/setup.yml @@ -19,11 +19,9 @@ namelist.config: restart_length_unit: "d" logfile_outfreq: 10 -namelist.oce: - oce_dyn: - Div_c: 0.5 - Leith_c: 0.05 - w_split: True +namelist.dyn: + dynamics_general: + use_wsplit: True namelist.ice: ice_dyn: @@ -73,13 +71,14 @@ namelist.io: prec: 8 fcheck: - a_ice: 0.3059942958760058 - salt: 23.866224273520945 - temp: 1.7172059436119271 - sst: 8.725966058658427 - u: -0.0014448488412238854 - v: 0.00018596541127645607 - aicen: 0.061198859175201174 + a_ice: 0.30599544390558286 + aicen: 0.061199088781116566 + salt: 23.866195697592563 + sst: 8.725992728181598 + temp: 1.717206734648259 + u: -0.001444895079544947 + v: 0.00018599946919795504 + diff --git a/setups/test_pi_linfs/setup.yml b/setups/test_pi_linfs/setup.yml new file mode 100644 index 000000000..aaa6d0999 --- /dev/null +++ b/setups/test_pi_linfs/setup.yml @@ -0,0 +1,78 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + ale_def: + which_ALE: "linfs" + +namelist.dyn: + dynamics_general: + use_wsplit: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.268577839695243 + salt: 23.94451194254492 + sst: 8.517818738095748 + temp: 1.701104466912738 + u: -0.001308996954725246 + v: 0.0001316762592120162 + + + + + + + + + diff --git a/setups/test_pi_partial/setup.yml b/setups/test_pi_partial/setup.yml new file mode 100644 index 000000000..33ffd2d71 --- /dev/null +++ b/setups/test_pi_partial/setup.yml @@ -0,0 +1,78 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + ale_def: + use_partial_cell: False + +namelist.dyn: + dynamics_general: + use_wsplit: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.26912709492615366 + salt: 23.944033079753975 + sst: 8.5316133160601 + temp: 1.701462830461885 + u: -0.0014154222939901564 + v: 0.00013995648270483183 + + + + + + + + + diff --git a/setups/test_pi_visc7/setup.yml b/setups/test_pi_visc7/setup.yml new file mode 100644 index 000000000..bed9e3451 --- /dev/null +++ b/setups/test_pi_visc7/setup.yml @@ -0,0 +1,79 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + +namelist.dyn: + dynamics_visc: + opt_visc: 7 + dynamics_general: + use_wsplit: True + + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.26912762639046617 + salt: 23.9440246569731 + sst: 8.53153464292271 + temp: 1.701768688625486 + u: -0.0014070898494307966 + v: 0.00014174869736213242 + + + + + + + + + diff --git a/setups/test_pi_zstar/setup.yml b/setups/test_pi_zstar/setup.yml new file mode 100644 index 000000000..3424b4b5a --- /dev/null +++ b/setups/test_pi_zstar/setup.yml @@ -0,0 +1,78 @@ +mesh: test_global +forcing: test_global +clim: + type: test_global + filelist: ['woa18_netcdf_5deg.nc','woa18_netcdf_5deg.nc'] + varlist: ['salt', 'temp'] +ntasks: 2 +time: "00:10:00" + +namelist.config: + timestep: + step_per_day: 96 + run_length: 1 + run_length_unit: "d" + geometry: + force_rotation: True + restart_log: + restart_length: 1 + restart_length_unit: "d" + logfile_outfreq: 10 + ale_def: + which_ALE: "zstar" + +namelist.dyn: + dynamics_general: + use_wsplit: True + +namelist.ice: + ice_dyn: + whichEVP: 1 + evp_rheol_steps: 120 + +namelist.io: + diag_list: + ldiag_energy: False + nml_list: + io_list: + "sst ": + freq: 1 + unit: d + prec: 8 + "a_ice ": + freq: 1 + unit: d + prec: 8 + "temp ": + freq: 1 + unit: d + prec: 8 + "salt ": + freq: 1 + unit: d + prec: 8 + "u ": + freq: 1 + unit: d + prec: 8 + "v ": + freq: 1 + unit: d + prec: 8 + +fcheck: + a_ice: 0.26912765975496816 + salt: 23.944024679315966 + sst: 8.531528641557886 + temp: 1.7017687500626169 + u: -0.0014072137916283753 + v: 0.0001418460244606028 + + + + + + + + + diff --git a/setups/test_souf/setup.yml b/setups/test_souf/setup.yml index be91281af..2ddb4a5e2 100644 --- a/setups/test_souf/setup.yml +++ b/setups/test_souf/setup.yml @@ -36,18 +36,22 @@ namelist.config: namelist.oce: oce_dyn: state_equation: 0 - Div_c: 0.5 - Leith_c: 0.05 - w_split: False Fer_GM: False Redi: False mix_scheme: "PP" - oce_tra: + +namelist.dyn: + dynamics_general: + use_wsplit: False + +namelist.tra: + tracer_phys: use_momix: False K_hor: 10 surf_relax_S: 0.0 balance_salt_water: False + namelist.ice: ice_dyn: whichEVP: 0 @@ -81,8 +85,8 @@ namelist.io: fcheck: salt: 35.0 - temp: 14.329708493584528 - sst: 18.939699611886315 - u: 0.027431619139927817 - v: -0.0008870629489199685 + temp: 14.329708416524904 + sst: 18.939699613817496 + u: 0.0274316731683607 + v: -0.0008870790518593145 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 28811319d..3d8a5b716 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,9 +1,43 @@ -cmake_minimum_required(VERSION 3.4) -set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9") +cmake_minimum_required(VERSION 3.9) project(fesom C Fortran) +if(DEFINED ENV{FESOM_PLATFORM_STRATEGY}) + set(FESOM_PLATFORM_STRATEGY $ENV{FESOM_PLATFORM_STRATEGY} CACHE STRING "switch to platform specific compile settings, this is usually determined via the env.sh script") +else() + set(FESOM_PLATFORM_STRATEGY "notset" CACHE STRING "switch to platform specific compile settings, this is usually determined via the env.sh script") +endif() + +if(DEFINED ENV{ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS}) # be able to set the initial cache value from our env settings for aleph, not only via cmake command + option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" ON) +else() + option(ALEPH_CRAYMPICH_WORKAROUNDS "workaround for performance issues on aleph" OFF) +endif() + +if(ALEPH_CRAYMPICH_WORKAROUNDS) + # todo: enable these options only for our targets + add_compile_options(-craympich-mt) # alternative cray-mpich library, about 5 % faster with cray-mpich/7.7.3 on aleph, not available for modules cray-mpich > 7.7.3; todo: test compiling and performance with cray-mpich > 7.7.3 + # make sure to also set these variables in the runtime environment: + # MPICH_MAX_THREAD_SAFETY=multiple # allows highest MPI thread level (i.e. MPI_THREAD_MULTIPLE) + # MPICH_CRAY_OPT_THREAD_SYNC=0 # the Cray MPICH library falls back to using the pthread_mutex-based thread-synchronization implementation + # MPICH_OPT_THREAD_SYNC=0 # seems to be a duplicate variable which also appears in some documentation instead of MPICH_CRAY_OPT_THREAD_SYNC (but this one brings a huge speed gain on aleph) + #add_compile_options(-DDISABLE_PARALLEL_RESTART_READ) # reading restarts is slow when doing it on parallel on aleph, switch it off for now + add_compile_options(-DENABLE_ALEPH_CRAYMPICH_WORKAROUNDS) +endif() + option(DISABLE_MULTITHREADING "disable asynchronous operations" OFF) +option(ENABLE_OPENACC "compile with OpenACC support" OFF) +set(NV_GPU_ARCH "cc80" CACHE STRING "GPU arch for nvfortran compiler (cc35,cc50,cc60,cc70,cc80,...)") + +option(ENABLE_OPENMP "build FESOM with OpenMP" OFF) +if(${ENABLE_OPENMP}) + find_package(OpenMP REQUIRED) +endif() + +# option to trigger building a library version of FESOM +# we do not always build the library along with the executable to avoid having two targets here in the CMakeLists.txt +# two targets would allow e.g. setting different compiler options or preprocessor definition, which would be error prone. +option(BUILD_FESOM_AS_LIBRARY "Build a library instead of an executable" OFF) # get our source files set(src_home ${CMAKE_CURRENT_LIST_DIR}) # path to src directory starting from the dir containing our CMakeLists.txt @@ -11,11 +45,15 @@ if(${USE_ICEPACK}) file(GLOB sources_Fortran ${src_home}/*.F90 ${src_home}/icepack_drivers/*.F90 ${src_home}/icepack_drivers/Icepack/columnphysics/*.F90) +elseif(${BUILD_FESOM_AS_LIBRARY}) + file(GLOB sources_Fortran ${src_home}/*.F90 + ${src_home}/ifs_interface/*.F90) # ICEPACK + LIBRARY NOT SUPPORTED (YET) else() file(GLOB sources_Fortran ${src_home}/*.F90) endif() #list(REMOVE_ITEM sources_Fortran ${src_home}/fesom_partition_init.F90) file(GLOB sources_C ${src_home}/*.c) +list(REMOVE_ITEM sources_C ${src_home}/psolve_feom.c) # does the file still exist? # generate a custom file from fesom_version_info.F90 which includes the current git SHA set(FESOM_ORIGINAL_VERSION_FILE ${src_home}/fesom_version_info.F90) @@ -30,8 +68,9 @@ add_custom_command(OUTPUT 5303B6F4_E4F4_45B2_A6E5_8E2B9FB5CDC4 ${FESOM_GENERATED #if(${FESOM_STANDALONE}) # list(REMOVE_ITEM sources_Fortran ${src_home}/cpl_driver.F90) #endif() -list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90) +list(REMOVE_ITEM sources_Fortran ${src_home}/fvom_init.F90 ${src_home}/oce_local.F90 ${src_home}/gen_comm.F90) list(REMOVE_ITEM sources_C ${src_home}/fort_part.c) +list(REMOVE_ITEM sources_Fortran ${src_home}/fesom_main.F90) # depends on the metis library #add_subdirectory(../lib/metis-5.1.0 ${PROJECT_BINARY_DIR}/metis) @@ -43,12 +82,23 @@ add_subdirectory(async_threads_cpp) include(${CMAKE_CURRENT_LIST_DIR}/../cmake/FindNETCDF.cmake) +if(${BUILD_FESOM_AS_LIBRARY}) + add_library(${PROJECT_NAME}_C ${sources_C}) +else() add_library(${PROJECT_NAME}_C ${sources_C}) +endif() target_compile_definitions(${PROJECT_NAME}_C PRIVATE PARMS USE_MPI REAL=double DBL HAS_BLAS FORTRAN_UNDERSCORE VOID_POINTER_SIZE_8 SGI LINUX UNDER_ MPI2) target_link_libraries(${PROJECT_NAME}_C parms) #metis -# create our binary (set its name to name of this project) -add_executable(${PROJECT_NAME} ${sources_Fortran}) + +# create our binary or library (set its name to name of this project) +# we do not always build the library along with the executable to avoid having two targets here in the CMakeLists.txt +# two targets would allow e.g. setting different compiler options or preprocessor definition, which would be error prone. +if(${BUILD_FESOM_AS_LIBRARY}) + add_library(${PROJECT_NAME} ${sources_Fortran}) +else() + add_executable(${PROJECT_NAME} ${sources_Fortran} ${src_home}/fesom_main.F90) +endif() target_compile_definitions(${PROJECT_NAME} PRIVATE PARMS -DMETIS_VERSION=5 -DPART_WEIGHTED -DMETISRANDOMSEED=35243) if(${DISABLE_MULTITHREADING}) target_compile_definitions(${PROJECT_NAME} PRIVATE DISABLE_MULTITHREADING) @@ -63,20 +113,56 @@ endif() if(${USE_ICEPACK}) target_compile_definitions(${PROJECT_NAME} PRIVATE __icepack) endif() +if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_definitions(${PROJECT_NAME} PRIVATE __ifsinterface) +endif() if(${VERBOSE}) target_compile_definitions(${PROJECT_NAME} PRIVATE VERBOSE) endif() +if(${OPENMP_REPRODUCIBLE}) + target_compile_definitions(${PROJECT_NAME} PRIVATE __openmp_reproducible) +endif() # CMAKE_Fortran_COMPILER_ID will also work if a wrapper is being used (e.g. mpif90 wraps ifort -> compiler id is Intel) if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) - target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -init=zero -no-wrap-margin) -# target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) + if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -init=zero -no-wrap-margin -fpe0) # add -fpe0 for RAPS environment + else() + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -ip -init=zero -no-wrap-margin) + endif() +# target_compile_options(${PROJECT_NAME} PRIVATE -qopenmp -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fimf-use-svml -xHost -ip -g -traceback -check all,noarg_temp_created,bounds,uninit ) #-ftrapuv ) #-init=zero) + if(${FESOM_PLATFORM_STRATEGY} STREQUAL levante.dkrz.de ) + target_compile_options(${PROJECT_NAME} PRIVATE -march=core-avx2 -mtune=core-avx2) + else() + target_compile_options(${PROJECT_NAME} PRIVATE -xHost) + endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) - target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) - if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) - target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" - endif() +# target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + target_compile_options(${PROJECT_NAME} PRIVATE -O2 -g -ffloat-store -finit-local-zero -finline-functions -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) - target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64) + if(${ENABLE_OPENMP}) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -N 1023 -homp) + else() + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64 -N 1023 -hnoomp) + endif() +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL NVHPC ) + target_compile_options(${PROJECT_NAME} PRIVATE -fast -fastsse -O3 -Mallocatable=95 -Mr8 -pgf90libs) + if(${ENABLE_OPENACC}) + # additional compiler settings + target_compile_options(${PROJECT_NAME} PRIVATE -acc -ta=tesla:${NV_GPU_ARCH} -Minfo=accel) + set(CMAKE_EXE_LINKER_FLAGS "-acc -ta=tesla:${NV_GPU_ARCH}") + endif() + if(${ENABLE_OPENMP}) + target_compile_options(${PROJECT_NAME} PRIVATE -Mipa=fast) + else() + target_compile_options(${PROJECT_NAME} PRIVATE -Mipa=fast,inline) + endif() +endif() +if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_options(${PROJECT_NAME} PRIVATE -fPIC) + target_compile_options(${PROJECT_NAME}_C PRIVATE -fPIC) endif() target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES} ${OASIS_Fortran_INCLUDE_DIRECTORIES}) target_include_directories(${PROJECT_NAME} PRIVATE ${MCT_Fortran_INCLUDE_DIRECTORIES} ${MPEU_Fortran_INCLUDE_DIRECTORIES}) @@ -85,8 +171,19 @@ target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${NETCDF_Fortran_LIBRARI target_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}_C ${MCT_Fortran_LIBRARIES} ${MPEU_Fortran_LIBRARIES} ${SCRIP_Fortran_LIBRARIES}) target_link_libraries(${PROJECT_NAME} async_threads_cpp) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) +if(${ENABLE_OPENMP} AND NOT ${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray) + target_compile_options(${PROJECT_NAME} PRIVATE ${OpenMP_Fortran_FLAGS}) # currently we only have OpenMP in the Fortran part + target_link_libraries(${PROJECT_NAME} OpenMP::OpenMP_Fortran) +endif() -set(FESOM_INSTALL_FILEPATH "${CMAKE_CURRENT_LIST_DIR}/../bin/fesom.x" CACHE FILEPATH "file path where the FESOM binary should be put") + +set(FESOM_INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/.." CACHE FILEPATH "directory where FESOM will be installed to via 'make install'") +if(${BUILD_FESOM_AS_LIBRARY}) + install(TARGETS ${PROJECT_NAME} DESTINATION "${FESOM_INSTALL_PREFIX}/lib") + install(TARGETS ${PROJECT_NAME}_C DESTINATION "${FESOM_INSTALL_PREFIX}/lib") +else() + set(FESOM_INSTALL_FILEPATH "${FESOM_INSTALL_PREFIX}/bin/fesom.x") get_filename_component(FESOM_INSTALL_PATH ${FESOM_INSTALL_FILEPATH} DIRECTORY) get_filename_component(FESOM_INSTALL_NAME ${FESOM_INSTALL_FILEPATH} NAME) install(PROGRAMS ${PROJECT_BINARY_DIR}/${PROJECT_NAME} DESTINATION ${FESOM_INSTALL_PATH} RENAME ${FESOM_INSTALL_NAME}) +endif() diff --git a/src/MOD_DYN.F90 b/src/MOD_DYN.F90 new file mode 100644 index 000000000..23ee1b243 --- /dev/null +++ b/src/MOD_DYN.F90 @@ -0,0 +1,296 @@ +!========================================================== +MODULE MOD_DYN +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE + +! +! +!_______________________________________________________________________________ +TYPE T_SOLVERINFO + integer :: ident = 1 + integer :: maxiter = 2000 + integer :: restart = 15 + integer :: fillin = 3 + integer :: lutype = 2 + real(kind=WP) :: droptol = 1.e-8 +!!! PARMS Solver + real(kind=WP) :: soltol = 1e-10 ! default for PARMS + logical :: use_parms = .TRUE. +!!! +!!! Sergey's Solver +! real(kind=WP) :: soltol = 1e-5 ! default for PARMS +! logical :: use_parms = .FALSE. +!!! + real(kind=WP), allocatable :: rr(:), zz(:), pp(:), App(:) + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_SOLVERINFO + procedure READ_T_SOLVERINFO + generic :: write(unformatted) => WRITE_T_SOLVERINFO + generic :: read(unformatted) => READ_T_SOLVERINFO +END TYPE T_SOLVERINFO +! +! +!_______________________________________________________________________________ +TYPE T_DYN_WORK + real(kind=WP), allocatable, dimension(:,:,:) :: uvnode_rhs + real(kind=WP), allocatable, dimension(:,:) :: u_c, v_c + + ! easy backscatter contribution + real(kind=WP), allocatable, dimension(:,:) :: u_b, v_b + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_DYN_WORK + procedure READ_T_DYN_WORK + generic :: write(unformatted) => WRITE_T_DYN_WORK + generic :: read(unformatted) => READ_T_DYN_WORK +END TYPE T_DYN_WORK +! +! +!_______________________________________________________________________________ +! set main structure for dynamicss, contains viscosity options and parameters + +! option for momentum advection +TYPE T_DYN +!___________________________________________________________________________ + ! instant zonal merdional velocity & Adams-Bashfort rhs + real(kind=WP), allocatable, dimension(:,:,:):: uv, uv_rhs, uv_rhsAB, fer_uv + + ! horizontal velocities at nodes + real(kind=WP), allocatable, dimension(:,:,:):: uvnode + + ! instant vertical vel arrays + real(kind=WP), allocatable, dimension(:,:) :: w, w_e, w_i, cfl_z, fer_w + + ! sea surface height arrays + real(kind=WP), allocatable, dimension(:) :: eta_n, d_eta, ssh_rhs, ssh_rhs_old + + !___________________________________________________________________________ + ! summarizes solver input parameter + type(t_solverinfo) :: solverinfo + + !___________________________________________________________________________ + ! put dynmiacs working arrays + type(t_dyn_work) :: work + + !___________________________________________________________________________ + ! opt_visc=... + ! 5=Kinematic (easy) Backscatter + ! 6=Biharmonic flow aware (viscosity depends on velocity Laplacian) + ! 7=Biharmonic flow aware (viscosity depends on velocity differences) + ! 8=Dynamic Backscatter + integer :: opt_visc = 5 + + ! gamma0 [m/s], backgroung viscosity= gamma0*len, it should be as small + ! as possible (keep it < 0.01 m/s). + ! gamma1 [nodim], for computation of the flow aware viscosity + ! gamma2 [s/m], is only used in easy backscatter option + real(kind=WP) :: visc_gamma0 = 0.03 + real(kind=WP) :: visc_gamma1 = 0.1 + real(kind=WP) :: visc_gamma2 = 0.285 + + ! coefficient for returned sub-gridscale energy, to be used with opt_visc=5 + ! (easy backscatter) + real(kind=WP) :: visc_easybsreturn = 1.5 + + logical :: use_ivertvisc = .true. + integer :: momadv_opt = 2 + + ! Switch on free slip + logical :: use_freeslip = .false. + + ! do implicite, explicite spliting of vertical velocity + logical :: use_wsplit = .false. + ! maximum allowed CFL criteria in vertical (0.5 < w_max_cfl < 1.) + ! in older FESOM it used to be w_exp_max=1.e-3 + real(kind=WP) :: wsplit_maxcfl= 1.0 + + !___________________________________________________________________________ + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_DYN + procedure READ_T_DYN + generic :: write(unformatted) => WRITE_T_DYN + generic :: read(unformatted) => READ_T_DYN +END TYPE T_DYN + +contains + +! +! +!_______________________________________________________________________________ +! set unformatted writing and reading for T_DYN_WORK +subroutine WRITE_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) + IMPLICIT NONE + class(T_SOLVERINFO), intent(in) :: tsolverinfo + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%ident + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%maxiter + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%restart + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%fillin + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol + write(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol + call write_bin_array(tsolverinfo%rr, unit, iostat, iomsg) + call write_bin_array(tsolverinfo%zz, unit, iostat, iomsg) + call write_bin_array(tsolverinfo%pp, unit, iostat, iomsg) + call write_bin_array(tsolverinfo%App, unit, iostat, iomsg) +end subroutine WRITE_T_SOLVERINFO + +subroutine READ_T_SOLVERINFO(tsolverinfo, unit, iostat, iomsg) + IMPLICIT NONE + class(T_SOLVERINFO), intent(inout) :: tsolverinfo + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%ident + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%maxiter + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%restart + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%fillin + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%lutype + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%droptol + read(unit, iostat=iostat, iomsg=iomsg) tsolverinfo%soltol + call read_bin_array(tsolverinfo%rr, unit, iostat, iomsg) + call read_bin_array(tsolverinfo%zz, unit, iostat, iomsg) + call read_bin_array(tsolverinfo%pp, unit, iostat, iomsg) + call read_bin_array(tsolverinfo%App, unit, iostat, iomsg) +end subroutine READ_T_SOLVERINFO + +! +! +!_______________________________________________________________________________ +! set unformatted writing and reading for T_DYN_WORK +subroutine WRITE_T_DYN_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN_WORK), intent(in) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(twork%uvnode_rhs, unit, iostat, iomsg) + call write_bin_array(twork%u_c, unit, iostat, iomsg) + call write_bin_array(twork%v_c, unit, iostat, iomsg) + call write_bin_array(twork%u_b, unit, iostat, iomsg) + call write_bin_array(twork%v_b, unit, iostat, iomsg) +end subroutine WRITE_T_DYN_WORK + +subroutine READ_T_DYN_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN_WORK), intent(inout) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(twork%uvnode_rhs, unit, iostat, iomsg) + call read_bin_array(twork%u_c, unit, iostat, iomsg) + call read_bin_array(twork%v_c, unit, iostat, iomsg) + call read_bin_array(twork%u_b, unit, iostat, iomsg) + call read_bin_array(twork%v_b, unit, iostat, iomsg) +end subroutine READ_T_DYN_WORK + +! +! +!_______________________________________________________________________________ +! set unformatted writing and reading for T_DYN +subroutine WRITE_T_DYN(dynamics, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN), intent(in) :: dynamics + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma1 + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma2 + write(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_easybsreturn + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc + write(unit, iostat=iostat, iomsg=iomsg) dynamics%momadv_opt + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_freeslip + write(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit + write(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) dynamics%work + + !___________________________________________________________________________ + call write_bin_array(dynamics%uv , unit, iostat, iomsg) + call write_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) + call write_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) + call write_bin_array(dynamics%uvnode , unit, iostat, iomsg) + call write_bin_array(dynamics%w , unit, iostat, iomsg) + call write_bin_array(dynamics%w_e , unit, iostat, iomsg) + call write_bin_array(dynamics%w_i , unit, iostat, iomsg) + call write_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + if (Fer_GM) then + call write_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call write_bin_array(dynamics%fer_uv, unit, iostat, iomsg) + end if + + +end subroutine WRITE_T_DYN + +subroutine READ_T_DYN(dynamics, unit, iostat, iomsg) + IMPLICIT NONE + class(T_DYN), intent(inout) :: dynamics + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%opt_visc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma0 + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma1 + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_gamma2 + read(unit, iostat=iostat, iomsg=iomsg) dynamics%visc_easybsreturn + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_ivertvisc + read(unit, iostat=iostat, iomsg=iomsg) dynamics%momadv_opt + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_freeslip + read(unit, iostat=iostat, iomsg=iomsg) dynamics%use_wsplit + read(unit, iostat=iostat, iomsg=iomsg) dynamics%wsplit_maxcfl + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%solverinfo + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) dynamics%work + + !___________________________________________________________________________ + call read_bin_array(dynamics%uv , unit, iostat, iomsg) + call read_bin_array(dynamics%uv_rhs , unit, iostat, iomsg) + call read_bin_array(dynamics%uv_rhsAB , unit, iostat, iomsg) + call read_bin_array(dynamics%uvnode , unit, iostat, iomsg) + call read_bin_array(dynamics%w , unit, iostat, iomsg) + call read_bin_array(dynamics%w_e , unit, iostat, iomsg) + call read_bin_array(dynamics%w_i , unit, iostat, iomsg) + call read_bin_array(dynamics%cfl_z , unit, iostat, iomsg) + if (Fer_GM) then + call read_bin_array(dynamics%fer_w , unit, iostat, iomsg) + call read_bin_array(dynamics%fer_uv , unit, iostat, iomsg) + end if + +end subroutine READ_T_DYN + +END MODULE MOD_DYN diff --git a/src/MOD_ICE.F90 b/src/MOD_ICE.F90 new file mode 100644 index 000000000..8b5d0ae84 --- /dev/null +++ b/src/MOD_ICE.F90 @@ -0,0 +1,826 @@ +MODULE MOD_ICE +USE o_PARAM, only: WP +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE + +! +! +!_______________________________________________________________________________ +! set data array derived type for ice-tracers (area, mice, msnow) more tracer +! are theretical possible +TYPE T_ICE_DATA + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: values, values_old, values_rhs, & + values_div_rhs, dvalues, valuesl + integer :: ID + !___________________________________________________________________________ + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_ICE_DATA + procedure READ_T_ICE_DATA + generic :: write(unformatted) => WRITE_T_ICE_DATA + generic :: read(unformatted) => READ_T_ICE_DATA +END TYPE T_ICE_DATA +! +! +!_______________________________________________________________________________ +! set work array derived type for ice +TYPE T_ICE_WORK + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: fct_tmax, fct_tmin + real(kind=WP), allocatable, dimension(:) :: fct_plus, fct_minus + real(kind=WP), allocatable, dimension(:,:) :: fct_fluxes + real(kind=WP), allocatable, dimension(:) :: fct_massmatrix + real(kind=WP), allocatable, dimension(:) :: sigma11, sigma12, sigma22 + real(kind=WP), allocatable, dimension(:) :: eps11, eps12, eps22 + real(kind=WP), allocatable, dimension(:) :: ice_strength, inv_areamass, inv_mass + !___________________________________________________________________________ + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_ICE_WORK + procedure READ_T_ICE_WORK + generic :: write(unformatted) => WRITE_T_ICE_WORK + generic :: read(unformatted) => READ_T_ICE_WORK +END TYPE T_ICE_WORK +! +! +!_______________________________________________________________________________ +! set work array derived type for ice +TYPE T_ICE_THERMO + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: t_skin, thdgr, thdgrsn, thdgr_old, ustar + !___________________________________________________________________________ + real(kind=WP) :: rhoair=1.3 , inv_rhoair=1./1.3 ! Air density & inverse , LY2004 !1.3 AOMIP + real(kind=WP) :: rhowat=1025., inv_rhowat=1./1025.! Water density & inverse + real(kind=WP) :: rhoice=910. , inv_rhoice=1./910. ! Ice density & inverse, AOMIP + real(kind=WP) :: rhosno=290. , inv_rhosno=1./290. ! Snow density & inverse, AOMIP + ! Specific heat of air, ice, snow [J/(kg * K)] + real(kind=WP) :: cpair=1005., cpice=2106., cpsno=2090. +! real(kind=WP) :: cc=rhowat*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) +! real(kind=WP) :: cl=rhoice*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) +! --> cl and cc are setted in subroutine ice_init(...) + real(kind=WP) :: cc=1025.*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) + real(kind=WP) :: cl=910.*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) + real(kind=WP) :: clhw=2.501e6 ! Specific latent heat [J/kg]: water -> water vapor + real(kind=WP) :: clhi=2.835e6 ! sea ice-> water vapor + real(kind=WP) :: tmelt=273.15 ! 0 deg C expressed in K + real(kind=WP) :: boltzmann=5.67E-8 ! S. Boltzmann const.*longw. emissivity + integer :: iclasses=7 ! Number of ice thickness gradations for ice growth calcs. + real(kind=WP) :: hmin= 0.01 ! Cut-off ice thickness !! + real(kind=WP) :: Armin=0.01 ! Minimum ice concentration !! + + ! --- namelist parameter /ice_therm/ + real(kind=WP) :: con= 2.1656, consn = 0.31 ! Thermal conductivities: ice & snow; W/m/K + real(kind=WP) :: Sice = 4.0 ! Ice salinity 3.2--5.0 ppt. + real(kind=WP) :: h0=1.0 ! Lead closing parameter [m] ! 0.5 + real(kind=WP) :: emiss_ice=0.97 ! Emissivity of Snow/Ice, + real(kind=WP) :: emiss_wat=0.97 ! Emissivity of open water + real(kind=WP) :: albsn = 0.81 ! Albedo: frozen snow + real(kind=WP) :: albsnm= 0.77 ! melting snow + real(kind=WP) :: albi = 0.70 ! frozen ice + real(kind=WP) :: albim = 0.68 ! melting ice + real(kind=WP) :: albw = 0.066 ! open water, LY2004 + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_ICE_THERMO + procedure READ_T_ICE_THERMO + generic :: write(unformatted) => WRITE_T_ICE_THERMO + generic :: read(unformatted) => READ_T_ICE_THERMO +END TYPE T_ICE_THERMO +! +! +!_______________________________________________________________________________ +! set work array derived type for ice +#if defined (__oasis) || defined (__ifsinterface) +TYPE T_ICE_ATMCOUPL + + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: oce_flx_h, ice_flx_h, tmpoce_flx_h, tmpice_flx_h +#if defined (__oifs) || defined (__ifsinterface) + !___________________________________________________________________________ + real(kind=WP), allocatable, dimension(:) :: ice_alb, enthalpyoffuse + ! !!! DONT FORGET ice_temp rhs_tempdiv rhs_temp is advected for oifs !!! --> becomes additional ice + ! tracer in ice%data(4)%values +#endif /* (__oifs) */ + !___________________________________________________________________________ + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_ICE_ATMCOUPL + procedure READ_T_ICE_ATMCOUPL + generic :: write(unformatted) => WRITE_T_ICE_ATMCOUPL + generic :: read(unformatted) => READ_T_ICE_ATMCOUPL +END TYPE T_ICE_ATMCOUPL +#endif /* (__oasis) */ + +! +! +!_______________________________________________________________________________ +! set main ice derived type contains parameters, data array, work array, u_ice, vice +TYPE T_ICE + + !___________________________________________________________________________ + ! zonal & merdional ice velocity + real(kind=WP), allocatable, dimension(:) :: uice, uice_rhs, uice_old, uice_aux + real(kind=WP), allocatable, dimension(:) :: vice, vice_rhs, vice_old, vice_aux + + ! surface stess atm<-->ice, oce<-->ice + real(kind=WP), allocatable, dimension(:) :: stress_atmice_x, stress_iceoce_x + real(kind=WP), allocatable, dimension(:) :: stress_atmice_y, stress_iceoce_y + + ! oce temp, salt, ssh, and uv at surface + real(kind=WP), allocatable, dimension(:) :: srfoce_temp, srfoce_salt, srfoce_ssh +! real(kind=WP), allocatable, dimension(:,:) :: srfoce_uv + real(kind=WP), allocatable, dimension(:) :: srfoce_u, srfoce_v + + ! freshwater & heatflux + real(kind=WP), allocatable, dimension(:) :: flx_fw, flx_h + + ! maEVP variables + real(kind=WP), allocatable, dimension(:) :: alpha_evp_array, beta_evp_array + + !___________________________________________________________________________ + ! total number of ice tracers (default=3, 1=area, 2=mice, 3=msnow, (4=ice_temp) +#if defined (__oifs) || defined (__ifsinterface) + integer :: num_itracers=4 +#else + integer :: num_itracers=3 +#endif + + ! put ice tracers data arrays + type(t_ice_data), allocatable, dimension(:) :: data + + !___________________________________________________________________________ + ! put ice working arrays + type(t_ice_work) :: work + + ! put thermodynamics arrays + type(t_ice_thermo) :: thermo + +#if defined (__oasis) || defined (__ifsinterface) + !___________________________________________________________________________ + ! put ice arrays for coupled model + type(t_ice_atmcoupl) :: atmcoupl +#endif /* (__oasis) */ + + !___________________________________________________________________________ + ! set ice model parameters: + ! --- RHEOLOGY --- + real(kind=WP) :: pstar = 30000.0_WP ![N/m^2] + real(kind=WP) :: ellipse = 2.0_WP ! + real(kind=WP) :: c_pressure = 20.0_WP ! + real(kind=WP) :: delta_min = 1.0e-11 ! [s^(-1)] + real(kind=WP) :: Clim_evp = 615 ! kg/m^2 + real(kind=WP) :: zeta_min = 4.0e+8 ! kg/s + integer :: evp_rheol_steps=120 ! EVP rheology cybcycling steps + real(kind=WP) :: ice_gamma_fct=0.25_WP ! smoothing parameter in ice fct advection + real(kind=WP) :: ice_diff = 10.0_WP ! diffusion to stabilize ice advection + real(kind=WP) :: theta_io =0.0_WP ! rotation angle (ice-ocean), available + ! --- in EVP --- + real(kind=WP) :: alpha_evp=250, beta_evp=250 + real(kind=WP) :: c_aevp=0.15 ! 0.1--0.2, but should be adjusted experimentally + ! --- Ice forcing averaging --- + integer :: ice_ave_steps=1 !ice step=ice_ave_steps*oce_step + real(kind=WP) :: cd_oce_ice = 5.5e-3 ! drag coef. oce - ice + logical :: ice_free_slip=.false. + integer :: whichEVP=0 ! 0=standart; 1=mEVP; 2=aEVP + + real(kind=WP) :: ice_dt ! ice step=ice_ave_steps*oce_step + real(kind=WP) :: Tevp_inv + + integer :: ice_steps_since_upd=0 + logical :: ice_update = .true. + !___________________________________________________________________________ + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_ICE + procedure READ_T_ICE + generic :: write(unformatted) => WRITE_T_ICE + generic :: read(unformatted) => READ_T_ICE +END TYPE T_ICE + +contains +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_DATA +subroutine WRITE_T_ICE_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_DATA), intent(in) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(tdata%values, unit, iostat, iomsg) + call write_bin_array(tdata%values_old, unit, iostat, iomsg) + call write_bin_array(tdata%values_rhs, unit, iostat, iomsg) + call write_bin_array(tdata%values_div_rhs, unit, iostat, iomsg) + call write_bin_array(tdata%dvalues, unit, iostat, iomsg) + call write_bin_array(tdata%valuesl, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine WRITE_T_ICE_DATA + +! Unformatted reading for T_ICE_DATA +subroutine READ_T_ICE_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_DATA), intent(inout) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(tdata%values, unit, iostat, iomsg) + call read_bin_array(tdata%values_old, unit, iostat, iomsg) + call read_bin_array(tdata%values_rhs, unit, iostat, iomsg) + call read_bin_array(tdata%values_div_rhs, unit, iostat, iomsg) + call read_bin_array(tdata%dvalues, unit, iostat, iomsg) + call read_bin_array(tdata%valuesl, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine READ_T_ICE_DATA +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_WORK +subroutine WRITE_T_ICE_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_WORK), intent(in) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(twork%fct_tmax, unit, iostat, iomsg) + call write_bin_array(twork%fct_tmin, unit, iostat, iomsg) + call write_bin_array(twork%fct_plus, unit, iostat, iomsg) + call write_bin_array(twork%fct_minus, unit, iostat, iomsg) + call write_bin_array(twork%fct_fluxes, unit, iostat, iomsg) + call write_bin_array(twork%fct_massmatrix,unit, iostat, iomsg) + call write_bin_array(twork%sigma11, unit, iostat, iomsg) + call write_bin_array(twork%sigma12, unit, iostat, iomsg) + call write_bin_array(twork%sigma22, unit, iostat, iomsg) + call write_bin_array(twork%eps11, unit, iostat, iomsg) + call write_bin_array(twork%eps12, unit, iostat, iomsg) + call write_bin_array(twork%eps22, unit, iostat, iomsg) + call write_bin_array(twork%ice_strength, unit, iostat, iomsg) + call write_bin_array(twork%inv_areamass, unit, iostat, iomsg) + call write_bin_array(twork%inv_mass, unit, iostat, iomsg) +end subroutine WRITE_T_ICE_WORK + +! Unformatted reading for T_ICE_WORK +subroutine READ_T_ICE_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_WORK), intent(inout) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(twork%fct_tmax, unit, iostat, iomsg) + call read_bin_array(twork%fct_tmin, unit, iostat, iomsg) + call read_bin_array(twork%fct_plus, unit, iostat, iomsg) + call read_bin_array(twork%fct_minus, unit, iostat, iomsg) + call read_bin_array(twork%fct_fluxes, unit, iostat, iomsg) + call read_bin_array(twork%fct_massmatrix,unit, iostat, iomsg) + call read_bin_array(twork%sigma11, unit, iostat, iomsg) + call read_bin_array(twork%sigma12, unit, iostat, iomsg) + call read_bin_array(twork%sigma22, unit, iostat, iomsg) + call read_bin_array(twork%eps11, unit, iostat, iomsg) + call read_bin_array(twork%eps12, unit, iostat, iomsg) + call read_bin_array(twork%eps22, unit, iostat, iomsg) + call read_bin_array(twork%ice_strength, unit, iostat, iomsg) + call read_bin_array(twork%inv_areamass, unit, iostat, iomsg) + call read_bin_array(twork%inv_mass, unit, iostat, iomsg) +end subroutine READ_T_ICE_WORK +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_WORK +subroutine WRITE_T_ICE_THERMO(ttherm, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_THERMO), intent(in) :: ttherm + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(ttherm%t_skin, unit, iostat, iomsg) + call write_bin_array(ttherm%thdgr, unit, iostat, iomsg) + call write_bin_array(ttherm%thdgrsn, unit, iostat, iomsg) + call write_bin_array(ttherm%thdgr_old, unit, iostat, iomsg) + call write_bin_array(ttherm%ustar, unit, iostat, iomsg) +end subroutine WRITE_T_ICE_THERMO + +! Unformatted reading for T_ICE_WORK +subroutine READ_T_ICE_THERMO(ttherm, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_THERMO), intent(inout) :: ttherm + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(ttherm%t_skin, unit, iostat, iomsg) + call read_bin_array(ttherm%thdgr, unit, iostat, iomsg) + call read_bin_array(ttherm%thdgrsn, unit, iostat, iomsg) + call read_bin_array(ttherm%thdgr_old, unit, iostat, iomsg) + call read_bin_array(ttherm%ustar, unit, iostat, iomsg) +end subroutine READ_T_ICE_THERMO +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_ATMCOUPL +#if defined (__oasis) || defined (__ifsinterface) +subroutine WRITE_T_ICE_ATMCOUPL(tcoupl, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_ATMCOUPL), intent(in) :: tcoupl + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call write_bin_array(tcoupl%oce_flx_h, unit, iostat, iomsg) + call write_bin_array(tcoupl%ice_flx_h, unit, iostat, iomsg) + call write_bin_array(tcoupl%tmpoce_flx_h, unit, iostat, iomsg) + call write_bin_array(tcoupl%tmpice_flx_h, unit, iostat, iomsg) +#if defined (__oifs) || defined (__ifsinterface) + call write_bin_array(tcoupl%ice_alb, unit, iostat, iomsg) + call write_bin_array(tcoupl%enthalpyoffuse, unit, iostat, iomsg) +#endif /* (__oifs) */ +end subroutine WRITE_T_ICE_ATMCOUPL +#endif /* (__oasis) */ + +! Unformatted reading for T_ICE_ATMCOUPL +#if defined (__oasis) || defined (__ifsinterface) +subroutine READ_T_ICE_ATMCOUPL(tcoupl, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE_ATMCOUPL), intent(inout) :: tcoupl + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + call read_bin_array(tcoupl%oce_flx_h, unit, iostat, iomsg) + call read_bin_array(tcoupl%ice_flx_h, unit, iostat, iomsg) + call read_bin_array(tcoupl%tmpoce_flx_h, unit, iostat, iomsg) + call read_bin_array(tcoupl%tmpice_flx_h, unit, iostat, iomsg) +#if defined (__oifs) || defined (__ifsinterface) + call read_bin_array(tcoupl%ice_alb, unit, iostat, iomsg) + call read_bin_array(tcoupl%enthalpyoffuse, unit, iostat, iomsg) +#endif /* (__oifs) */ +end subroutine READ_T_ICE_ATMCOUPL +#endif /* (__oasis) */ +! +! +!_______________________________________________________________________________ +! Unformatted writing for T_ICE_ATMCOUPL +subroutine WRITE_T_ICE(ice, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE), intent(in) :: ice + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers + do i=1, ice%num_itracers + write(unit, iostat=iostat, iomsg=iomsg) ice%data(i) + end do + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) ice%thermo + write(unit, iostat=iostat, iomsg=iomsg) ice%work +#if defined (__oasis) || defined (__ifsinterface) + write(unit, iostat=iostat, iomsg=iomsg) ice%atmcoupl +#endif /* (__oasis) */ + + !___________________________________________________________________________ + write(unit, iostat=iostat, iomsg=iomsg) ice%pstar + write(unit, iostat=iostat, iomsg=iomsg) ice%ellipse + write(unit, iostat=iostat, iomsg=iomsg) ice%c_pressure + write(unit, iostat=iostat, iomsg=iomsg) ice%delta_min + write(unit, iostat=iostat, iomsg=iomsg) ice%Clim_evp + write(unit, iostat=iostat, iomsg=iomsg) ice%zeta_min + write(unit, iostat=iostat, iomsg=iomsg) ice%evp_rheol_steps + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_gamma_fct + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_diff + write(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv + write(unit, iostat=iostat, iomsg=iomsg) ice%theta_io + write(unit, iostat=iostat, iomsg=iomsg) ice%alpha_evp + write(unit, iostat=iostat, iomsg=iomsg) ice%beta_evp + write(unit, iostat=iostat, iomsg=iomsg) ice%c_aevp + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_ave_steps + write(unit, iostat=iostat, iomsg=iomsg) ice%cd_oce_ice + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_free_slip + write(unit, iostat=iostat, iomsg=iomsg) ice%whichEVP + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_dt + write(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_steps_since_upd + write(unit, iostat=iostat, iomsg=iomsg) ice%ice_update + + !___________________________________________________________________________ + call write_bin_array(ice%uice , unit, iostat, iomsg) + call write_bin_array(ice%uice_rhs , unit, iostat, iomsg) + call write_bin_array(ice%uice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call write_bin_array(ice%uice_aux , unit, iostat, iomsg) + call write_bin_array(ice%vice , unit, iostat, iomsg) + call write_bin_array(ice%vice_rhs , unit, iostat, iomsg) + call write_bin_array(ice%vice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call write_bin_array(ice%vice_aux , unit, iostat, iomsg) + call write_bin_array(ice%stress_atmice_x, unit, iostat, iomsg) + call write_bin_array(ice%stress_iceoce_x, unit, iostat, iomsg) + call write_bin_array(ice%stress_atmice_y, unit, iostat, iomsg) + call write_bin_array(ice%stress_iceoce_y, unit, iostat, iomsg) + call write_bin_array(ice%srfoce_u , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_v , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_temp , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_salt , unit, iostat, iomsg) + call write_bin_array(ice%srfoce_ssh , unit, iostat, iomsg) + call write_bin_array(ice%flx_fw , unit, iostat, iomsg) + call write_bin_array(ice%flx_h , unit, iostat, iomsg) + if (ice%whichEVP > 0) then + call write_bin_array(ice%alpha_evp_array , unit, iostat, iomsg) + call write_bin_array(ice%beta_evp_array , unit, iostat, iomsg) + end if + +end subroutine WRITE_T_ICE + +! Unformatted reading for T_ICE +subroutine READ_T_ICE(ice, unit, iostat, iomsg) + IMPLICIT NONE + class(T_ICE), intent(inout) :: ice + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) ice%num_itracers + if (.not. allocated(ice%data)) allocate(ice%data(ice%num_itracers)) + do i=1, ice%num_itracers + read(unit, iostat=iostat, iomsg=iomsg) ice%data(i) + end do + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) ice%thermo + read(unit, iostat=iostat, iomsg=iomsg) ice%work +#if defined (__oasis) || defined (__ifsinterface) + read(unit, iostat=iostat, iomsg=iomsg) ice%atmcoupl +#endif /* (__oasis) */ + + !___________________________________________________________________________ + read(unit, iostat=iostat, iomsg=iomsg) ice%pstar + read(unit, iostat=iostat, iomsg=iomsg) ice%ellipse + read(unit, iostat=iostat, iomsg=iomsg) ice%c_pressure + read(unit, iostat=iostat, iomsg=iomsg) ice%delta_min + read(unit, iostat=iostat, iomsg=iomsg) ice%Clim_evp + read(unit, iostat=iostat, iomsg=iomsg) ice%zeta_min + read(unit, iostat=iostat, iomsg=iomsg) ice%evp_rheol_steps + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_gamma_fct + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_diff + read(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv + read(unit, iostat=iostat, iomsg=iomsg) ice%theta_io + read(unit, iostat=iostat, iomsg=iomsg) ice%alpha_evp + read(unit, iostat=iostat, iomsg=iomsg) ice%beta_evp + read(unit, iostat=iostat, iomsg=iomsg) ice%c_aevp + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_ave_steps + read(unit, iostat=iostat, iomsg=iomsg) ice%cd_oce_ice + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_free_slip + read(unit, iostat=iostat, iomsg=iomsg) ice%whichEVP + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_dt + read(unit, iostat=iostat, iomsg=iomsg) ice%Tevp_inv + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_steps_since_upd + read(unit, iostat=iostat, iomsg=iomsg) ice%ice_update + + !___________________________________________________________________________ + call read_bin_array(ice%uice , unit, iostat, iomsg) + call read_bin_array(ice%uice_rhs , unit, iostat, iomsg) + call read_bin_array(ice%uice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call read_bin_array(ice%uice_aux , unit, iostat, iomsg) + call read_bin_array(ice%vice , unit, iostat, iomsg) + call read_bin_array(ice%vice_rhs , unit, iostat, iomsg) + call read_bin_array(ice%vice_old , unit, iostat, iomsg) + if (ice%whichEVP /= 0) call read_bin_array(ice%vice_aux , unit, iostat, iomsg) + call read_bin_array(ice%stress_atmice_x , unit, iostat, iomsg) + call read_bin_array(ice%stress_iceoce_x , unit, iostat, iomsg) + call read_bin_array(ice%stress_atmice_y , unit, iostat, iomsg) + call read_bin_array(ice%stress_iceoce_y , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_u , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_v , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_temp , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_salt , unit, iostat, iomsg) + call read_bin_array(ice%srfoce_ssh , unit, iostat, iomsg) + call read_bin_array(ice%flx_fw , unit, iostat, iomsg) + call read_bin_array(ice%flx_h , unit, iostat, iomsg) + if (ice%whichEVP > 0) then + call read_bin_array(ice%alpha_evp_array , unit, iostat, iomsg) + call read_bin_array(ice%beta_evp_array , unit, iostat, iomsg) + end if + +end subroutine READ_T_ICE +END MODULE MOD_ICE +! +! +!_______________________________________________________________________________ +! interface to initialise derived type for sea ice +module ice_init_interface + interface + subroutine ice_init(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARSUP + USE MOD_PARTIT + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface +end module +! +! +!_______________________________________________________________________________ +! initialise derived type for sea ice +subroutine ice_init(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_param, only: WP + IMPLICIT NONE + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: elem_size, node_size, n, ed(2) + integer, save :: nm_unit = 105 ! unit to open namelist file, skip 100-102 for cray + integer :: iost + !___________________________________________________________________________ + ! define ice namelist parameter + integer :: whichEVP, evp_rheol_steps, ice_ave_steps + real(kind=WP) :: Pstar, ellipse, c_pressure, delta_min, ice_gamma_fct, & + ice_diff, theta_io, alpha_evp, beta_evp, c_aevp, Cd_oce_ice + namelist /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, & + Cd_oce_ice, ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, & + alpha_evp, beta_evp, c_aevp + + real(kind=WP) :: Sice, h0, emiss_ice, emiss_wat, albsn, albsnm, albi, & + albim, albw, con, consn + namelist /ice_therm/ Sice, h0, emiss_ice, emiss_wat, albsn, albsnm, albi, & + albim, albw, con, consn + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ + ! open and read namelist.ice for I/O + open(unit=nm_unit, file='namelist.ice', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) write(*,*) ' file : ', 'namelist.ice',' open ok' + else + if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.ice',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + read(nm_unit, nml=ice_dyn , iostat=iost) + read(nm_unit, nml=ice_therm, iostat=iost) + close(nm_unit) + + !___________________________________________________________________________ + ! set parameters in ice derived type from namelist.ice --> namelist /ice_dyn/ + ice%whichEVP = whichEVP + ice%pstar = Pstar + ice%ellipse = ellipse + ice%c_pressure = c_pressure + ice%delta_min = delta_min + ice%evp_rheol_steps = evp_rheol_steps + ice%cd_oce_ice = Cd_oce_ice + ice%ice_gamma_fct = ice_gamma_fct + ice%ice_diff = ice_diff + ice%theta_io = theta_io + ice%ice_ave_steps = ice_ave_steps + ice%alpha_evp = alpha_evp + ice%beta_evp = beta_evp + ice%c_aevp = c_aevp + + ! set parameters in ice derived type from namelist.ice --> namelist /ice_therm/ + ice%thermo%con = con + ice%thermo%consn = consn + ice%thermo%Sice = Sice + ice%thermo%h0 = h0 + ice%thermo%emiss_ice= emiss_ice + ice%thermo%emiss_wat= emiss_wat + ice%thermo%albsn = albsn + ice%thermo%albsnm = albsnm + ice%thermo%albi = albi + ice%thermo%albim = albim + ice%thermo%albw = albw + + ice%thermo%cc=ice%thermo%rhowat*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) + ice%thermo%cl=ice%thermo%rhoice*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) + + !___________________________________________________________________________ + ! define local vertice & elem array size + elem_size=myDim_elem2D+eDim_elem2D + node_size=myDim_nod2D +eDim_nod2D + + !___________________________________________________________________________ + ! allocate/initialise arrays in ice derived type + ! initialise velocity and stress related arrays in ice derived type + allocate(ice%uice( node_size)) + allocate(ice%uice_rhs( node_size)) + allocate(ice%uice_old( node_size)) + allocate(ice%vice( node_size)) + allocate(ice%vice_rhs( node_size)) + allocate(ice%vice_old( node_size)) + allocate(ice%stress_atmice_x( node_size)) + allocate(ice%stress_iceoce_x( node_size)) + allocate(ice%stress_atmice_y( node_size)) + allocate(ice%stress_iceoce_y( node_size)) + ice%uice = 0.0_WP + ice%uice_rhs = 0.0_WP + ice%uice_old = 0.0_WP + ice%stress_atmice_x = 0.0_WP + ice%stress_iceoce_x = 0.0_WP + ice%vice = 0.0_WP + ice%vice_rhs = 0.0_WP + ice%vice_old = 0.0_WP + ice%stress_atmice_y = 0.0_WP + ice%stress_iceoce_y = 0.0_WP + if (ice%whichEVP /= 0) then + allocate(ice%uice_aux( node_size)) + allocate(ice%vice_aux( node_size)) + ice%uice_aux = 0.0_WP + ice%vice_aux = 0.0_WP + end if + if (ice%whichEVP == 2) then + allocate(ice%alpha_evp_array( node_size)) + allocate(ice%beta_evp_array( node_size)) + ice%alpha_evp_array = ice%alpha_evp + ice%beta_evp_array = ice%alpha_evp + end if + + !___________________________________________________________________________ + ! initialise surface ocean arrays in ice derived type + allocate(ice%srfoce_u( node_size)) + allocate(ice%srfoce_v( node_size)) + allocate(ice%srfoce_temp( node_size)) + allocate(ice%srfoce_salt( node_size)) + allocate(ice%srfoce_ssh( node_size)) + ice%srfoce_u = 0.0_WP + ice%srfoce_v = 0.0_WP + ice%srfoce_temp = 0.0_WP + ice%srfoce_salt = 0.0_WP + ice%srfoce_ssh = 0.0_WP + + allocate(ice%flx_fw(node_size)) + allocate(ice%flx_h( node_size)) + ice%flx_fw = 0.0_WP + ice%flx_h = 0.0_WP + + !___________________________________________________________________________ + ! initialse data array of ice derived type containing "ice tracer" that have + ! to be advected: a_ice (index=1), m_ice (index=2), m_snow (index=3), + ! ice_temp (index=4, only when coupled) + allocate(ice%data(ice%num_itracers)) + do n = 1, ice%num_itracers + allocate(ice%data(n)%values( node_size)) + allocate(ice%data(n)%values_old(node_size)) + allocate(ice%data(n)%values_rhs(node_size)) + allocate(ice%data(n)%values_div_rhs(node_size)) + allocate(ice%data(n)%dvalues( node_size)) + allocate(ice%data(n)%valuesl( node_size)) + ice%data(n)%ID = n + ice%data(n)%values = 0.0_WP + ice%data(n)%values_old = 0.0_WP + ice%data(n)%values_rhs = 0.0_WP + ice%data(n)%values_div_rhs = 0.0_WP + ice%data(n)%dvalues = 0.0_WP + ice%data(n)%valuesl = 0.0_WP + if (n==4) ice%data(n)%values = 265.15_WP + end do + + !___________________________________________________________________________ + ! initialse work array of ice derived type + allocate(ice%work%fct_tmax( node_size)) + allocate(ice%work%fct_tmin( node_size)) + allocate(ice%work%fct_plus( node_size)) + allocate(ice%work%fct_minus( node_size)) + allocate(ice%work%fct_fluxes( elem_size, 3)) + ice%work%fct_tmax = 0.0_WP + ice%work%fct_tmin = 0.0_WP + ice%work%fct_plus = 0.0_WP + ice%work%fct_minus = 0.0_WP + ice%work%fct_fluxes = 0.0_WP + + allocate(ice%work%fct_massmatrix(sum(nn_num(1:myDim_nod2D)))) + ice%work%fct_massmatrix = 0.0_WP + + allocate(ice%work%sigma11( elem_size)) + allocate(ice%work%sigma12( elem_size)) + allocate(ice%work%sigma22( elem_size)) + allocate(ice%work%eps11( elem_size)) + allocate(ice%work%eps12( elem_size)) + allocate(ice%work%eps22( elem_size)) + ice%work%sigma11 = 0.0_WP + ice%work%sigma12 = 0.0_WP + ice%work%sigma22 = 0.0_WP + ice%work%eps11 = 0.0_WP + ice%work%eps12 = 0.0_WP + ice%work%eps22 = 0.0_WP + + allocate(ice%work%ice_strength( elem_size)) + allocate(ice%work%inv_areamass( node_size)) + allocate(ice%work%inv_mass( node_size)) + ice%work%ice_strength= 0.0_WP + ice%work%inv_areamass= 0.0_WP + ice%work%inv_mass = 0.0_WP + + !___________________________________________________________________________ + ! initialse thermo array of ice derived type + allocate(ice%thermo%ustar( node_size)) + allocate(ice%thermo%t_skin( node_size)) + allocate(ice%thermo%thdgr( node_size)) + allocate(ice%thermo%thdgrsn( node_size)) + allocate(ice%thermo%thdgr_old( node_size)) + ice%thermo%ustar = 0.0_WP + ice%thermo%t_skin = 0.0_WP + ice%thermo%thdgr = 0.0_WP + ice%thermo%thdgrsn = 0.0_WP + ice%thermo%thdgr_old = 0.0_WP + + !___________________________________________________________________________ + ! initialse coupling array of ice derived type +#if defined (__oasis) || defined (__ifsinterface) + allocate(ice%atmcoupl%oce_flx_h( node_size)) + allocate(ice%atmcoupl%ice_flx_h( node_size)) + allocate(ice%atmcoupl%tmpoce_flx_h( node_size)) + allocate(ice%atmcoupl%tmpice_flx_h( node_size)) + ice%atmcoupl%oce_flx_h = 0.0_WP + ice%atmcoupl%ice_flx_h = 0.0_WP + ice%atmcoupl%tmpoce_flx_h = 0.0_WP + ice%atmcoupl%tmpice_flx_h = 0.0_WP +#if defined (__oifs) || defined (__ifsinterface) + allocate(ice%atmcoupl%ice_alb( node_size)) + allocate(ice%atmcoupl%enthalpyoffuse(node_size)) + ice%atmcoupl%ice_alb = 0.6_WP + ice%atmcoupl%enthalpyoffuse= 0.0_WP +#endif /* (__oifs) */ +#endif /* (__oasis) */ + + !___________________________________________________________________________ + ! --> took from oce_mesh.F90 --> subroutine mesh_auxiliary_arrays(partit, mesh) + ! to here since namelist.ice is now read in ice_init where whichEVP is not available + ! when mesh_auxiliary_arrays is called + !array of 2D boundary conditions is used in ice_maEVP + if (ice%whichEVP > 0) then + allocate(mesh%bc_index_nod2D(myDim_nod2D+eDim_nod2D)) + mesh%bc_index_nod2D=1._WP + do n=1, myDim_edge2D + ed=mesh%edges(:, n) + if (myList_edge2D(n) <= mesh%edge2D_in) cycle + mesh%bc_index_nod2D(ed)=0._WP + end do + end if + +end subroutine ice_init +! +! +! +! +! +!_______________________________________________________________________________ +! initialise derived type for sea ice +subroutine ice_init_toyocean_dummy(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_param, only: WP + IMPLICIT NONE + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: node_size, n + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ + ! define local vertice & elem array size + node_size=myDim_nod2D+eDim_nod2D + + !___________________________________________________________________________ + ! allocate/initialise arrays in ice derived type + ! initialise velocity and stress related arrays in ice derived type + allocate(ice%uice( node_size)) + allocate(ice%vice( node_size)) + ice%uice = 0.0_WP + ice%vice = 0.0_WP + allocate(ice%data(ice%num_itracers)) + do n = 1, ice%num_itracers + allocate(ice%data(n)%values( node_size)) + ice%data(n)%ID = n + ice%data(n)%values = 0.0_WP + end do +end subroutine ice_init_toyocean_dummy diff --git a/src/MOD_MESH.F90 b/src/MOD_MESH.F90 index 53f45e1bd..dd3c40c72 100644 --- a/src/MOD_MESH.F90 +++ b/src/MOD_MESH.F90 @@ -1,6 +1,8 @@ !========================================================== MODULE MOD_MESH USE O_PARAM +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS USE, intrinsic :: ISO_FORTRAN_ENV IMPLICIT NONE SAVE @@ -9,16 +11,17 @@ MODULE MOD_MESH TYPE SPARSE_MATRIX integer :: nza integer :: dim - real(kind=WP), allocatable, dimension(:) :: values + real(kind=WP), allocatable, dimension(:) :: values integer(int32), allocatable, dimension(:) :: colind integer(int32), allocatable, dimension(:) :: rowptr integer(int32), allocatable, dimension(:) :: colind_loc integer(int32), allocatable, dimension(:) :: rowptr_loc + real(kind=WP), allocatable, dimension(:) :: pr_values !preconditioner values END TYPE SPARSE_MATRIX TYPE T_MESH integer :: nod2D ! the number of 2D nodes -real(kind=WP) :: ocean_area +real(kind=WP) :: ocean_area, ocean_areawithcav real(kind=WP), allocatable, dimension(:,:) :: coord_nod2D, geo_coord_nod2D integer :: edge2D ! the number of 2D edges integer :: edge2D_in ! the number of internal 2D edges @@ -63,7 +66,7 @@ MODULE MOD_MESH ! ! !___horizontal mesh info________________________________________________________ -real(kind=WP), allocatable, dimension(:,:) :: area, area_inv +real(kind=WP), allocatable, dimension(:,:) :: area, area_inv, areasvol, areasvol_inv real(kind=WP), allocatable, dimension(:) :: mesh_resolution ! @@ -81,6 +84,11 @@ MODULE MOD_MESH real(kind=WP), allocatable, dimension(:,:) :: cavity_nrst_cavlpnt_xyz +! +! +!___coriolis force______________________________________________________________ +real(kind=WP), allocatable, dimension(:) :: coriolis_node, coriolis + ! ! !___Elevation stiffness matrix__________________________________________________ @@ -89,10 +97,248 @@ MODULE MOD_MESH !#if defined (__oasis) real(kind=WP), allocatable, dimension(:) :: lump2d_south, lump2d_north integer, allocatable, dimension(:) :: ind_south, ind_north -!#endif +!#endif + +integer :: nn_size +integer, allocatable, dimension(:) :: nn_num +integer, allocatable, dimension(:,:) :: nn_pos + +!_______________________________________________________________________________ +! Arrays added for ALE implementation: +! --> layer thinkness at node and depthlayer for t=n and t=n+1 +real(kind=WP), allocatable,dimension(:,:) :: hnode, hnode_new, zbar_3d_n, Z_3d_n + +! --> layer thinkness at elements, interpolated from hnode +real(kind=WP), allocatable,dimension(:,:) :: helem + +! --> thinkness of bottom elem (important for partial cells) +real(kind=WP), allocatable,dimension(:) :: bottom_elem_thickness +real(kind=WP), allocatable,dimension(:) :: bottom_node_thickness + +! --> The increment of total fluid depth on elements. It is used to update the matrix +real(kind=WP), allocatable,dimension(:) :: dhe - character(:), allocatable :: representative_checksum +! --> hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. +real(kind=WP), allocatable,dimension(:) :: hbar, hbar_old + +! --> auxiliary array to store depth of layers and depth of mid level due to changing +! layer thinkness at every node +!real(kind=WP), allocatable,dimension(:) :: zbar_n, Z_n + +! new bottom depth at node and element due to partial cells +real(kind=WP), allocatable,dimension(:) :: zbar_n_bot +real(kind=WP), allocatable,dimension(:) :: zbar_e_bot + +! new depth of cavity-ocean interface at node and element due to partial cells +real(kind=WP), allocatable,dimension(:) :: zbar_n_srf +real(kind=WP), allocatable,dimension(:) :: zbar_e_srf + +character(:), allocatable :: representative_checksum + +contains +#if defined(__PGI) + private +#endif + procedure write_t_mesh + procedure read_t_mesh + generic :: write(unformatted) => write_t_mesh + generic :: read(unformatted) => read_t_mesh END TYPE T_MESH + +contains + +! Unformatted writing for t_mesh +subroutine write_t_mesh(mesh, unit, iostat, iomsg) + IMPLICIT NONE + class(t_mesh), intent(in) :: mesh + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i, j, k + integer :: s1, s2, s3 + ! write records (giving sizes for the allocation for arrays) + write(unit, iostat=iostat, iomsg=iomsg) mesh%nod2D + write(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_area + write(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_areawithcav + write(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D + write(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D_in + write(unit, iostat=iostat, iomsg=iomsg) mesh%elem2D + call write_bin_array(mesh%elem2D_nodes, unit, iostat, iomsg) + call write_bin_array(mesh%edges, unit, iostat, iomsg) + call write_bin_array(mesh%edge_tri, unit, iostat, iomsg) + call write_bin_array(mesh%elem_edges, unit, iostat, iomsg) + call write_bin_array(mesh%elem_area, unit, iostat, iomsg) + call write_bin_array(mesh%edge_dxdy, unit, iostat, iomsg) + + call write_bin_array(mesh%edge_cross_dxdy, unit, iostat, iomsg) + call write_bin_array(mesh%elem_cos, unit, iostat, iomsg) + call write_bin_array(mesh%metric_factor, unit, iostat, iomsg) + call write_bin_array(mesh%elem_neighbors, unit, iostat, iomsg) + call write_bin_array(mesh%nod_in_elem2D, unit, iostat, iomsg) + call write_bin_array(mesh%x_corners, unit, iostat, iomsg) + call write_bin_array(mesh%y_corners, unit, iostat, iomsg) + call write_bin_array(mesh%nod_in_elem2D_num, unit, iostat, iomsg) + call write_bin_array(mesh%depth, unit, iostat, iomsg) + call write_bin_array(mesh%gradient_vec, unit, iostat, iomsg) + call write_bin_array(mesh%gradient_sca, unit, iostat, iomsg) + call write_bin_array(mesh%bc_index_nod2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) mesh%nl + + call write_bin_array(mesh%zbar, unit, iostat, iomsg) + call write_bin_array(mesh%Z, unit, iostat, iomsg) + call write_bin_array(mesh%elem_depth, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels_nod2D, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels_nod2D_max, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels_nod2D, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels_nod2D_min, unit, iostat, iomsg) + call write_bin_array(mesh%area, unit, iostat, iomsg) + call write_bin_array(mesh%area_inv, unit, iostat, iomsg) + call write_bin_array(mesh%areasvol, unit, iostat, iomsg) + call write_bin_array(mesh%areasvol_inv, unit, iostat, iomsg) + call write_bin_array(mesh%mesh_resolution, unit, iostat, iomsg) + + call write_bin_array(mesh%cavity_flag_n, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_flag_e, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_depth, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_nrst_cavlpnt_xyz, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%dim + write(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%nza + + call write_bin_array(mesh%ssh_stiff%rowptr, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%colind, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%values, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%colind_loc, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%rowptr_loc, unit, iostat, iomsg) + + call write_bin_array(mesh%lump2d_south, unit, iostat, iomsg) + call write_bin_array(mesh%lump2d_north, unit, iostat, iomsg) + call write_bin_array(mesh%ind_south, unit, iostat, iomsg) + call write_bin_array(mesh%ind_north, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size + call write_bin_array(mesh%nn_num, unit, iostat, iomsg) + call write_bin_array(mesh%nn_pos, unit, iostat, iomsg) + call write_bin_array(mesh%hnode, unit, iostat, iomsg) + call write_bin_array(mesh%hnode_new, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_3d_n, unit, iostat, iomsg) + call write_bin_array(mesh%Z_3d_n, unit, iostat, iomsg) + call write_bin_array(mesh%helem, unit, iostat, iomsg) + call write_bin_array(mesh%bottom_elem_thickness, unit, iostat, iomsg) + call write_bin_array(mesh%bottom_node_thickness, unit, iostat, iomsg) + call write_bin_array(mesh%dhe, unit, iostat, iomsg) + call write_bin_array(mesh%hbar, unit, iostat, iomsg) + call write_bin_array(mesh%hbar_old, unit, iostat, iomsg) +! call write_bin_array(mesh%zbar_n, unit, iostat, iomsg) +! call write_bin_array(mesh%Z_n, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) +! call write_bin_array(mesh%representative_checksum, unit, iostat, iomsg) + call write_bin_array(mesh%coriolis, unit, iostat, iomsg) + call write_bin_array(mesh%coriolis_node, unit, iostat, iomsg) + +end subroutine write_t_mesh + +! Unformatted reading for t_mesh +subroutine read_t_mesh(mesh, unit, iostat, iomsg) + IMPLICIT NONE + class(t_mesh), intent(inout) :: mesh + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i, j, k + integer :: s1, s2, s3 + ! write records (giving sizes for the allocation for arrays) + read(unit, iostat=iostat, iomsg=iomsg) mesh%nod2D + read(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_area + read(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_areawithcav + read(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D + read(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D_in + read(unit, iostat=iostat, iomsg=iomsg) mesh%elem2D + + call read_bin_array(mesh%elem2D_nodes, unit, iostat, iomsg) + call read_bin_array(mesh%edges, unit, iostat, iomsg) + call read_bin_array(mesh%edge_tri, unit, iostat, iomsg) + call read_bin_array(mesh%elem_edges, unit, iostat, iomsg) + call read_bin_array(mesh%elem_area, unit, iostat, iomsg) + call read_bin_array(mesh%edge_dxdy, unit, iostat, iomsg) + + call read_bin_array(mesh%edge_cross_dxdy, unit, iostat, iomsg) + call read_bin_array(mesh%elem_cos, unit, iostat, iomsg) + call read_bin_array(mesh%metric_factor, unit, iostat, iomsg) + call read_bin_array(mesh%elem_neighbors, unit, iostat, iomsg) + call read_bin_array(mesh%nod_in_elem2D, unit, iostat, iomsg) + call read_bin_array(mesh%x_corners, unit, iostat, iomsg) + call read_bin_array(mesh%y_corners, unit, iostat, iomsg) + call read_bin_array(mesh%nod_in_elem2D_num, unit, iostat, iomsg) + call read_bin_array(mesh%depth, unit, iostat, iomsg) + call read_bin_array(mesh%gradient_vec, unit, iostat, iomsg) + call read_bin_array(mesh%gradient_sca, unit, iostat, iomsg) + call read_bin_array(mesh%bc_index_nod2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) mesh%nl + + call read_bin_array(mesh%zbar, unit, iostat, iomsg) + call read_bin_array(mesh%Z, unit, iostat, iomsg) + call read_bin_array(mesh%elem_depth, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels_nod2D, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels_nod2D_max, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels_nod2D, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels_nod2D_min, unit, iostat, iomsg) + call read_bin_array(mesh%area, unit, iostat, iomsg) + call read_bin_array(mesh%area_inv, unit, iostat, iomsg) + call read_bin_array(mesh%areasvol, unit, iostat, iomsg) + call read_bin_array(mesh%areasvol_inv, unit, iostat, iomsg) + call read_bin_array(mesh%mesh_resolution, unit, iostat, iomsg) + + call read_bin_array(mesh%cavity_flag_n, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_flag_e, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_depth, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_nrst_cavlpnt_xyz, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%dim + read(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%nza + + call read_bin_array(mesh%ssh_stiff%rowptr, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%colind, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%values, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%colind_loc, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%rowptr_loc, unit, iostat, iomsg) + + call read_bin_array(mesh%lump2d_south, unit, iostat, iomsg) + call read_bin_array(mesh%lump2d_north, unit, iostat, iomsg) + call read_bin_array(mesh%ind_south, unit, iostat, iomsg) + call read_bin_array(mesh%ind_north, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size + call read_bin_array(mesh%nn_num, unit, iostat, iomsg) + call read_bin_array(mesh%nn_pos, unit, iostat, iomsg) + call read_bin_array(mesh%hnode, unit, iostat, iomsg) + call read_bin_array(mesh%hnode_new, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_3d_n, unit, iostat, iomsg) + call read_bin_array(mesh%Z_3d_n, unit, iostat, iomsg) + call read_bin_array(mesh%helem, unit, iostat, iomsg) + call read_bin_array(mesh%bottom_elem_thickness, unit, iostat, iomsg) + call read_bin_array(mesh%bottom_node_thickness, unit, iostat, iomsg) + call read_bin_array(mesh%dhe, unit, iostat, iomsg) + call read_bin_array(mesh%hbar, unit, iostat, iomsg) + call read_bin_array(mesh%hbar_old, unit, iostat, iomsg) +! call read_bin_array(mesh%zbar_n, unit, iostat, iomsg) +! call read_bin_array(mesh%Z_n, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) +! call read_bin_array(mesh%representative_checksum, unit, iostat, iomsg) + call read_bin_array(mesh%coriolis, unit, iostat, iomsg) + call read_bin_array(mesh%coriolis_node, unit, iostat, iomsg) + +end subroutine read_t_mesh end module MOD_MESH !========================================================== diff --git a/src/MOD_PARTIT.F90 b/src/MOD_PARTIT.F90 new file mode 100644 index 000000000..c51ae2221 --- /dev/null +++ b/src/MOD_PARTIT.F90 @@ -0,0 +1,202 @@ +!========================================================== +! Variables to organize parallel work +module MOD_PARTIT +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +#if defined(_OPENMP) + USE OMP_LIB +#endif +IMPLICIT NONE +SAVE +include 'mpif.h' +integer, parameter :: MAX_LAENDERECK=16 +integer, parameter :: MAX_NEIGHBOR_PARTITIONS=32 + + +type com_struct + integer :: rPEnum ! the number of PE I receive info from + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: rPE ! their list + integer, dimension(MAX_NEIGHBOR_PARTITIONS+1) :: rptr ! allocatables to the list of nodes + integer, dimension(:), allocatable :: rlist ! the list of nodes + integer :: sPEnum ! send part + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sPE + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sptr + integer, dimension(:), allocatable :: slist + integer, dimension(:), allocatable :: req ! request for MPI_Wait + integer :: nreq ! number of requests for MPI_Wait + ! (to combine halo exchange of several fields) + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_COM_STRUCT + procedure READ_T_COM_STRUCT + generic :: write(unformatted) => WRITE_T_COM_STRUCT + generic :: read(unformatted) => READ_T_COM_STRUCT +end type com_struct + +TYPE T_PARTIT + integer :: MPI_COMM_FESOM ! FESOM communicator (for ocean only runs if often a copy of MPI_COMM_WORLD) + + type(com_struct) :: com_nod2D + type(com_struct) :: com_elem2D + type(com_struct) :: com_elem2D_full + + ! MPI Datatypes for interface exchange + ! Element fields (2D; 2D integer; 3D with nl-1 or nl levels, 1 - 4 values) + ! small halo and / or full halo + !!! s(r)_mpitype_* are constructed during the runtime ans shall not be dumped!!! + integer, allocatable :: s_mpitype_elem2D(:,:), r_mpitype_elem2D(:,:) + integer, allocatable :: s_mpitype_elem2D_full_i(:), r_mpitype_elem2D_full_i(:) + integer, allocatable :: s_mpitype_elem2D_full(:,:), r_mpitype_elem2D_full(:,:) + integer, allocatable :: s_mpitype_elem3D(:,:,:), r_mpitype_elem3D(:,:,:) + integer, allocatable :: s_mpitype_elem3D_full(:,:,:),r_mpitype_elem3D_full(:,:,:) + + ! Nodal fields (2D; 2D integer; 3D with nl-1 or nl levels, one, two, or three values) + integer, allocatable :: s_mpitype_nod2D(:), r_mpitype_nod2D(:) + integer, allocatable :: s_mpitype_nod2D_i(:), r_mpitype_nod2D_i(:) + integer, allocatable :: s_mpitype_nod3D(:,:,:), r_mpitype_nod3D(:,:,:) + + ! general MPI part + integer :: MPIERR + integer :: npes + integer :: mype + integer :: maxPEnum=100 + integer, allocatable, dimension(:) :: part + + ! Mesh partition + integer :: myDim_nod2D, eDim_nod2D + integer, allocatable, dimension(:) :: myList_nod2D + integer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + integer, allocatable, dimension(:) :: myList_elem2D + integer :: myDim_edge2D, eDim_edge2D + integer, allocatable, dimension(:) :: myList_edge2D + + integer :: pe_status = 0 ! if /=0 then something is wrong + !!! remPtr_* are constructed during the runtime and shall not be dumped!!! + integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) + integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) + + logical :: elem_full_flag +#if defined(_OPENMP) + !!! plock is constructed during the runtime and shall not be dumped!!! + integer(omp_lock_kind), allocatable :: plock(:) +#endif + contains +#if defined(__PGI) + private +#endif + procedure WRITE_T_PARTIT + procedure READ_T_PARTIT + generic :: write(unformatted) => WRITE_T_PARTIT + generic :: read(unformatted) => READ_T_PARTIT +END TYPE T_PARTIT +contains + +! Unformatted writing for COM_STRUCT TYPE +subroutine WRITE_T_COM_STRUCT(tstruct, unit, iostat, iomsg) + IMPLICIT NONE + class(COM_STRUCT), intent(in) :: tstruct + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + write(unit, iostat=iostat, iomsg=iomsg) tstruct%rPEnum + call write1d_int_static(tstruct%rPE, unit, iostat, iomsg) + call write1d_int_static(tstruct%rptr, unit, iostat, iomsg) + call write_bin_array(tstruct%rlist, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tstruct%sPEnum + call write1d_int_static(tstruct%sPE, unit, iostat, iomsg) + call write1d_int_static(tstruct%sptr, unit, iostat, iomsg) + call write_bin_array(tstruct%slist, unit, iostat, iomsg) + ! req is constructed during the runtime + ! call write_bin_array(tstruct%req, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tstruct%nreq +end subroutine WRITE_T_COM_STRUCT + +subroutine READ_T_COM_STRUCT(tstruct, unit, iostat, iomsg) + IMPLICIT NONE + class(COM_STRUCT), intent(inout) :: tstruct + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + read(unit, iostat=iostat, iomsg=iomsg) tstruct%rPEnum + call read1d_int_static(tstruct%rPE, unit, iostat, iomsg) + call read1d_int_static(tstruct%rptr, unit, iostat, iomsg) + call read_bin_array(tstruct%rlist, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tstruct%sPEnum + call read1d_int_static(tstruct%sPE, unit, iostat, iomsg) + call read1d_int_static(tstruct%sptr, unit, iostat, iomsg) + call read_bin_array(tstruct%slist, unit, iostat, iomsg) +! req is constructed during the runtime +! call read_bin_array(tstruct%req, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tstruct%nreq +end subroutine READ_T_COM_STRUCT + +! Unformatted writing for T_PARTIT +subroutine WRITE_T_PARTIT(partit, unit, iostat, iomsg) + IMPLICIT NONE + class(T_PARTIT), intent(in) :: partit + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + write(unit, iostat=iostat, iomsg=iomsg) partit%com_nod2D + write(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D_full + + write(unit, iostat=iostat, iomsg=iomsg) partit%npes + write(unit, iostat=iostat, iomsg=iomsg) partit%mype + write(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum + call write_bin_array(partit%part, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_nod2D + call write_bin_array(partit%myList_nod2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eXDim_elem2D + call write_bin_array(partit%myList_elem2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_edge2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_edge2D + call write_bin_array(partit%myList_edge2D, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) partit%pe_status +end subroutine WRITE_T_PARTIT +! Unformatted reading for T_PARTIT +subroutine READ_T_PARTIT(partit, unit, iostat, iomsg) + IMPLICIT NONE + class(T_PARTIT), intent(inout) :: partit + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + read(unit, iostat=iostat, iomsg=iomsg) partit%com_nod2D + read(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D_full + + read(unit, iostat=iostat, iomsg=iomsg) partit%npes + read(unit, iostat=iostat, iomsg=iomsg) partit%mype + read(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum + call read_bin_array(partit%part, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_nod2D + call read_bin_array(partit%myList_nod2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eXDim_elem2D + call read_bin_array(partit%myList_elem2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_edge2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_edge2D + call read_bin_array(partit%myList_edge2D, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) partit%pe_status +end subroutine READ_T_PARTIT + +end module MOD_PARTIT diff --git a/src/MOD_READ_BINARY_ARRAYS.F90 b/src/MOD_READ_BINARY_ARRAYS.F90 new file mode 100644 index 000000000..84b883c43 --- /dev/null +++ b/src/MOD_READ_BINARY_ARRAYS.F90 @@ -0,0 +1,118 @@ +!========================================================== +! +!------------------------------------------------------------------------------------------ +! useful interface (read_bin_array) for reading arbitary binary arrays into an opened file +MODULE MOD_READ_BINARY_ARRAYS +use o_PARAM +private +public :: read_bin_array, read1d_int_static +INTERFACE read_bin_array + MODULE PROCEDURE read1d_real, read1d_int, read1d_char, read2d_real, read2d_int, read3d_real, read3d_int +END INTERFACE +contains +subroutine read1d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + if (.not. allocated(arr)) allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_real + +subroutine read1d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + if (.not. allocated(arr)) allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_int + +subroutine read1d_char(arr, unit, iostat, iomsg) + character, intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + if (.not. allocated(arr)) allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_char + +subroutine read1d_int_static(arr, unit, iostat, iomsg) + IMPLICIT NONE + integer, intent(inout) :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_int_static + +subroutine read2d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2 + if ((s1==0) .or. (s2==0)) return + if (.not. allocated(arr)) allocate(arr(s1, s2)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) +end subroutine read2d_real + +subroutine read2d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2 + if ((s1==0) .or. (s2==0)) return + if (.not. allocated(arr)) allocate(arr(s1, s2)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) +end subroutine read2d_int + +subroutine read3d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + if ((s1==0) .or. (s2==0) .or. (s3==0)) return + if (.not. allocated(arr)) allocate(arr(s1,s2,s3)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) +end subroutine read3d_real + +subroutine read3d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + if ((s1==0) .or. (s2==0) .or. (s3==0)) return + if (.not. allocated(arr)) allocate(arr(s1,s2,s3)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) +end subroutine read3d_int +end module MOD_READ_BINARY_ARRAYS +!========================================================== + diff --git a/src/MOD_TRACER.F90 b/src/MOD_TRACER.F90 new file mode 100644 index 000000000..6f1912d54 --- /dev/null +++ b/src/MOD_TRACER.F90 @@ -0,0 +1,239 @@ +!========================================================== +MODULE MOD_TRACER +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE + +TYPE T_TRACER_DATA +real(kind=WP), allocatable, dimension(:,:) :: values, valuesAB ! instant values & Adams-Bashfort interpolation +logical :: smooth_bh_tra=.false. +real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra +logical :: i_vert_diff =.false. +character(20) :: tra_adv_hor, tra_adv_ver, tra_adv_lim ! type of the advection scheme for this tracer +real(kind=WP) :: tra_adv_ph = 1. ! a parameter to be used in horizontal advection (for MUSCL it is the fraction of fourth-order contribution in the solution) +real(kind=WP) :: tra_adv_pv = 1. ! a parameter to be used in horizontal advection (for QR4C it is the fraction of fourth-order contribution in the solution) +integer :: ID + +contains +#if defined(__PGI) +private +#endif +procedure WRITE_T_TRACER_DATA +procedure READ_T_TRACER_DATA +generic :: write(unformatted) => WRITE_T_TRACER_DATA +generic :: read(unformatted) => READ_T_TRACER_DATA +END TYPE T_TRACER_DATA + + +TYPE T_TRACER_WORK +!auxuary arrays to work with tracers: +real(kind=WP), allocatable :: del_ttf(:,:) +real(kind=WP), allocatable :: del_ttf_advhoriz(:,:), del_ttf_advvert(:,:) +!_______________________________________________________________________________ +! in case ldiag_DVD=.true. --> calculate discrete variance decay (DVD) +real(kind=WP), allocatable :: tr_dvd_horiz(:,:,:), tr_dvd_vert(:,:,:) +! The fct part +real(kind=WP),allocatable,dimension(:,:) :: fct_LO ! Low-order solution +real(kind=WP),allocatable,dimension(:,:) :: adv_flux_hor ! Antidif. horiz. contrib. from edges / backup for iterafive fct scheme +real(kind=WP),allocatable,dimension(:,:) :: adv_flux_ver ! Antidif. vert. fluxes from nodes / backup for iterafive fct scheme + +real(kind=WP),allocatable,dimension(:,:) :: fct_ttf_max,fct_ttf_min +real(kind=WP),allocatable,dimension(:,:) :: fct_plus,fct_minus +! MUSCL type reconstruction +integer,allocatable,dimension(:) :: nboundary_lay +integer,allocatable,dimension(:,:) :: edge_up_dn_tri +real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad + +contains +#if defined(__PGI) +private +#endif +procedure WRITE_T_TRACER_WORK +procedure READ_T_TRACER_WORK +generic :: write(unformatted) => WRITE_T_TRACER_WORK +generic :: read(unformatted) => READ_T_TRACER_WORK +END TYPE T_TRACER_WORK + +! auxury type for reading namelist.tra +TYPE NML_TRACER_LIST_TYPE + INTEGER :: ID =-1 + CHARACTER(len=4) :: adv_hor ='NONE' + CHARACTER(len=4) :: adv_ver ='NONE' + CHARACTER(len=4) :: adv_lim ='NONE' + REAL(kind=WP) :: adv_ph =1. + REAL(kind=WP) :: adv_pv =1. +END TYPE NML_TRACER_LIST_TYPE + +TYPE T_TRACER +! total number of tracers: +integer :: num_tracers=2 +type(t_tracer_data), allocatable :: data(:) +type(t_tracer_work) :: work +! general options for all tracers (can be moved to T_TRACER is needed) +! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). +! we keep these tracer characteristics for each tracer individually (contained in T_TRACER_DATA), although in +! the namelist.tra they are define unique for all tracers. +!logical :: smooth_bh_tra = .false. +!real(kind=WP) :: gamma0_tra = 0.0005 +!real(kind=WP) :: gamma1_tra = 0.0125 +!real(kind=WP) :: gamma2_tra = 0. +!logical :: i_vert_diff = .true. + +contains +#if defined(__PGI) +private +#endif +procedure WRITE_T_TRACER +procedure READ_T_TRACER +generic :: write(unformatted) => WRITE_T_TRACER +generic :: read(unformatted) => READ_T_TRACER +END TYPE T_TRACER + +contains + +! Unformatted writing for T_TRACER_DATA +subroutine WRITE_T_TRACER_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_DATA), intent(in) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call write_bin_array(tdata%values, unit, iostat, iomsg) + call write_bin_array(tdata%valuesAB, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tdata%smooth_bh_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma0_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma1_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma2_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%i_vert_diff + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_hor + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ver + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_lim + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ph + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_pv + write(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine WRITE_T_TRACER_DATA + +! Unformatted reading for T_TRACER_DATA +subroutine READ_T_TRACER_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_DATA), intent(inout) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call read_bin_array(tdata%values, unit, iostat, iomsg) + call read_bin_array(tdata%valuesAB, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tdata%smooth_bh_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma0_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma1_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma2_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%i_vert_diff + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_hor + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ver + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_lim + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ph + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_pv + read(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine READ_T_TRACER_DATA + +! Unformatted writing for T_TRACER_WORK +subroutine WRITE_T_TRACER_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_WORK), intent(in) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call write_bin_array(twork%del_ttf, unit, iostat, iomsg) + call write_bin_array(twork%del_ttf_advhoriz, unit, iostat, iomsg) + call write_bin_array(twork%del_ttf_advvert, unit, iostat, iomsg) + call write_bin_array(twork%tr_dvd_horiz, unit, iostat, iomsg) + call write_bin_array(twork%tr_dvd_vert, unit, iostat, iomsg) + call write_bin_array(twork%fct_LO, unit, iostat, iomsg) + call write_bin_array(twork%adv_flux_hor, unit, iostat, iomsg) + call write_bin_array(twork%adv_flux_ver, unit, iostat, iomsg) + call write_bin_array(twork%fct_ttf_max, unit, iostat, iomsg) + call write_bin_array(twork%fct_ttf_min, unit, iostat, iomsg) + call write_bin_array(twork%fct_plus, unit, iostat, iomsg) + call write_bin_array(twork%fct_minus, unit, iostat, iomsg) + call write_bin_array(twork%nboundary_lay, unit, iostat, iomsg) + call write_bin_array(twork%edge_up_dn_tri, unit, iostat, iomsg) + call write_bin_array(twork%edge_up_dn_grad, unit, iostat, iomsg) +end subroutine WRITE_T_TRACER_WORK + +! Unformatted reading for T_TRACER_WORK +subroutine READ_T_TRACER_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_WORK), intent(inout) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call read_bin_array(twork%del_ttf, unit, iostat, iomsg) + call read_bin_array(twork%del_ttf_advhoriz, unit, iostat, iomsg) + call read_bin_array(twork%del_ttf_advvert, unit, iostat, iomsg) + call read_bin_array(twork%tr_dvd_horiz, unit, iostat, iomsg) + call read_bin_array(twork%tr_dvd_vert, unit, iostat, iomsg) + call read_bin_array(twork%fct_LO, unit, iostat, iomsg) + call read_bin_array(twork%adv_flux_hor, unit, iostat, iomsg) + call read_bin_array(twork%adv_flux_ver, unit, iostat, iomsg) + call read_bin_array(twork%fct_ttf_max, unit, iostat, iomsg) + call read_bin_array(twork%fct_ttf_min, unit, iostat, iomsg) + call read_bin_array(twork%fct_plus, unit, iostat, iomsg) + call read_bin_array(twork%fct_minus, unit, iostat, iomsg) + call read_bin_array(twork%nboundary_lay, unit, iostat, iomsg) + call read_bin_array(twork%edge_up_dn_tri, unit, iostat, iomsg) + call read_bin_array(twork%edge_up_dn_grad, unit, iostat, iomsg) +end subroutine READ_T_TRACER_WORK + +! Unformatted writing for T_TRACER +subroutine WRITE_T_TRACER(tracer, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER), intent(in) :: tracer + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + + write(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers + do i=1, tracer%num_tracers + write(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) + end do + write(unit, iostat=iostat, iomsg=iomsg) tracer%work +! write(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra +! write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra +! write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra +! write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra +! write(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +end subroutine WRITE_T_TRACER + +! Unformatted reading for T_TRACER +subroutine READ_T_TRACER(tracer, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER), intent(inout) :: tracer + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + + read(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers +! write(*,*) 'number of tracers to read: ', tracer%num_tracers + if (.not. allocated(tracer%data)) allocate(tracer%data(tracer%num_tracers)) + do i=1, tracer%num_tracers + read(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) +! write(*,*) 'tracer info:', tracer%data(i)%ID, TRIM(tracer%data(i)%tra_adv_hor), TRIM(tracer%data(i)%tra_adv_ver), TRIM(tracer%data(i)%tra_adv_lim) + end do + read(unit, iostat=iostat, iomsg=iomsg) tracer%work +! read(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra +! read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra +! read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra +! read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra +! read(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +end subroutine READ_T_TRACER +end module MOD_TRACER +!========================================================== + diff --git a/src/MOD_WRITE_BINARY_ARRAYS.F90 b/src/MOD_WRITE_BINARY_ARRAYS.F90 new file mode 100644 index 000000000..4f03b5cea --- /dev/null +++ b/src/MOD_WRITE_BINARY_ARRAYS.F90 @@ -0,0 +1,160 @@ +!========================================================== +! +!------------------------------------------------------------------------------------------ +! useful interface (write_bin_array) for writing arbitary binary arrays into an opened file +MODULE MOD_WRITE_BINARY_ARRAYS +use o_PARAM +private +public :: write_bin_array, write1d_int_static +INTERFACE write_bin_array + MODULE PROCEDURE write1d_real, write1d_int, write1d_char, write2d_real, write2d_int, write3d_real, write3d_int +END INTERFACE +contains + +subroutine write1d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_real + +subroutine write1d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_int + +subroutine write1d_char(arr, unit, iostat, iomsg) + character, intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_char + +subroutine write1d_int_static(arr, unit, iostat, iomsg) + IMPLICIT NONE + integer, intent(in) :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine write1d_int_static + +subroutine write2d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) + else + s1=0 + s2=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + end if +end subroutine write2d_real + +subroutine write2d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) + else + s1=0 + s2=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + end if +end subroutine write2d_int + + +subroutine write3d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + s3=size(arr, 3) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) + else + s1=0 + s2=0 + s3=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + end if +end subroutine write3d_real + +subroutine write3d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + s3=size(arr, 3) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) + else + s1=0 + s2=0 + s3=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + end if +end subroutine write3d_int +end module MOD_WRITE_BINARY_ARRAYS +!========================================================== + diff --git a/src/Makefile b/src/Makefile index 7d93ff9c2..598dc8d39 100755 --- a/src/Makefile +++ b/src/Makefile @@ -19,20 +19,26 @@ LIB_PARMS = -L$(PARMS_DIR)/lib -lparms CPP_SOL = -DPARMS ###### Objects for Mesh Partitioning ################################################ -# modules -MOD_INI = fort_part.o oce_modules.o MOD_MESH.o gen_modules_config.o gen_modules_partitioning.o gen_modules_rotate_grid.o - -OBJ_INI = fvom_init.o \ - oce_local.o \ - gen_comm.o +OBJ_INI = fort_part.o oce_modules.o MOD_READ_BINARY_ARRAYS.o MOD_WRITE_BINARY_ARRAYS.o MOD_MESH.o MOD_PARTIT.o \ + gen_modules_partitioning.o gen_modules_config.o \ + gen_modules_rotate_grid.o oce_local.o gen_comm.o fvom_init.o # objects -MODULES = oce_modules.o \ +OBJECTS = fortran_utils.o \ + oce_modules.o \ + info_module.o \ + command_line_options.o \ + MOD_READ_BINARY_ARRAYS.o \ + MOD_WRITE_BINARY_ARRAYS.o \ MOD_MESH.o \ + MOD_PARTIT.o \ + MOD_DYN.o \ + gen_modules_partitioning.o \ + MOD_ICE.o \ + MOD_TRACER.o \ ice_modules.o \ gen_modules_config.o \ - gen_modules_partitioning.o \ gen_modules_clock.o \ gen_modules_rotate_grid.o \ gen_modules_read_NetCDF.o \ @@ -71,45 +77,51 @@ MODULES = oce_modules.o \ io_netcdf_workaround_module.o \ io_data_strategy.o \ fesom_version_info.o \ + io_netcdf_attribute_module.o \ + io_netcdf_file_module.o \ + io_scatter.o \ io_meandata.o \ + io_restart_derivedtype.o \ + io_fesom_file.o \ + io_restart_file_group.o \ io_restart.o \ io_blowup.o \ io_mesh_info.o \ + oce_ale_pressure_bv.o \ gen_ic3d.o \ gen_surface_forcing.o \ gen_modules_gpot.o \ - toy_channel_soufflet.o - -OBJECTS= fvom_main.o \ - gen_comm.o \ + toy_channel_soufflet.o \ + gen_modules_backscatter.o \ + solver.o \ + oce_ale_vel_rhs.o \ + write_step_info.o \ + oce_fer_gm.o \ + oce_ale_tracer.o \ + oce_ale.o \ oce_setup_step.o \ + gen_comm.o \ oce_mesh.o \ oce_dyn.o \ - oce_ale_vel_rhs.o \ - oce_vel_rhs_vinv.o \ - oce_ale_pressure_bv.o \ - oce_fer_gm.o \ oce_muscl_adv.o \ - oce_ice_init_state.o \ oce_shortwave_pene.o \ - oce_ale.o \ - oce_ale_tracer.o \ cavity_param.o \ ice_EVP.o \ ice_maEVP.o \ - ice_setup_step.o \ ice_fct.o \ - ice_oce_coupling.o \ ice_thermo_oce.o \ + ice_setup_step.o \ + ice_oce_coupling.o \ gen_model_setup.o \ gen_forcing_init.o \ gen_bulk_formulae.o \ gen_forcing_couple.o \ gen_interpolation.o \ gen_events.o \ - write_step_info.o \ oce_mo_conv.o \ - oce_spp.o + oce_spp.o \ + fesom_module.o \ + fesom_main.o # oce_pressure_bv.o \ @@ -125,14 +137,14 @@ default: run run: $(MODULES) $(OBJECTS) @echo "======= Building FESOM ==========" - $(LD) $(OPT) -o $(EXE) $(MODULES) $(OBJECTS) \ - $(MPI_LIB) $(LIB_LAP) $(LIB_PARMS) $(NC_LIB) + $(LD) $(OPT) -o $(EXE) $(OBJECTS) \ + $(MPI_LIB) $(LIB_LAP) $(NC_LIB) $(LIB_PARMS) # cp -pf $(EXE) ../bin/. run_ini: CPP_DEFS+=-DFVOM_INIT -run_ini: cleanomod $(MOD_INI) $(OBJ_INI) +run_ini: cleanomod $(OBJ_INI) @echo "======= Building FESOM partioning program ==========" - $(LD) $(OPT) -o $(EXE_INI) $(MOD_INI) $(OBJ_INI) \ + $(LD) $(OPT) -o $(EXE_INI) $(OBJ_INI) \ $(MPI_LIB) $(LIB_METIS) $(NC_LIB) cp -pf $(EXE_INI) ../bin/. diff --git a/src/associate_mesh_ass.h b/src/associate_mesh_ass.h new file mode 100644 index 000000000..882fc053e --- /dev/null +++ b/src/associate_mesh_ass.h @@ -0,0 +1,69 @@ +nod2D => mesh%nod2D +elem2D => mesh%elem2D +edge2D => mesh%edge2D +edge2D_in => mesh%edge2D_in +ocean_area => mesh%ocean_area +nl => mesh%nl +nn_size => mesh%nn_size +ocean_areawithcav => mesh%ocean_areawithcav +coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D(:,:) +geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D(:,:) +elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes(:,:) +edges(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edges(:,:) +edge_tri(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_tri(:,:) +elem_edges(1:3,1:myDim_elem2D) => mesh%elem_edges(:,:) +elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area(:) +edge_dxdy(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_dxdy(:,:) +edge_cross_dxdy(1:4,1:myDim_edge2D+eDim_edge2D) => mesh%edge_cross_dxdy(:,:) +elem_cos(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_cos(:) +metric_factor(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%metric_factor(:) +elem_neighbors(1:3,1:myDim_elem2D) => mesh%elem_neighbors(:,:) +nod_in_elem2D => mesh%nod_in_elem2D ! (maxval(rmax),myDim_nod2D+eDim_nod2D) +x_corners => mesh%x_corners ! (myDim_nod2D, maxval(rmax)) +y_corners => mesh%y_corners ! (myDim_nod2D, maxval(rmax)) +nod_in_elem2D_num(1:myDim_nod2D+eDim_nod2D) => mesh%nod_in_elem2D_num(:) +depth(1:myDim_nod2D+eDim_nod2D) => mesh%depth(:) +gradient_vec(1:6,1:myDim_elem2D) => mesh%gradient_vec(:,:) +gradient_sca(1:6,1:myDim_elem2D) => mesh%gradient_sca(:,:) +bc_index_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%bc_index_nod2D(:) +zbar(1:mesh%nl) => mesh%zbar(:) +Z(1:mesh%nl-1) => mesh%Z(:) +elem_depth => mesh%elem_depth ! never used, not even allocated +nlevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%nlevels(:) +nlevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D(:) +nlevels_nod2D_min(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D_min(:) +area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area(:,:) +areasvol(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol(:,:) +area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv(:,:) +areasvol_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol_inv(:,:) +mesh_resolution(1:myDim_nod2d+eDim_nod2D) => mesh%mesh_resolution(:) +ssh_stiff => mesh%ssh_stiff +lump2d_north(1:myDim_nod2d) => mesh%lump2d_north(:) +lump2d_south(1:myDim_nod2d) => mesh%lump2d_south(:) +cavity_flag_n(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_flag_n(:) +cavity_flag_e(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%cavity_flag_e(:) +!!$cavity_lev_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_lev_nod2D +!!$cavity_lev_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%cavity_lev_elem2D +cavity_depth(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_depth(:) +ulevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%ulevels(:) +ulevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D(:) +ulevels_nod2D_max(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D_max(:) +nn_num(1:myDim_nod2D) => mesh%nn_num(:) +nn_pos(1:mesh%nn_size, 1:myDim_nod2D) => mesh%nn_pos(:,:) +hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode(:,:) +hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new(:,:) +zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n(:,:) +Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n(:,:) +helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem(:,:) +bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness(:) +bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness(:) +dhe(1:myDim_elem2D) => mesh%dhe(:) +hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar(:) +hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old(:) +!zbar_n(1:mesh%nl) => mesh%zbar_n +!Z_n(1:mesh%nl-1) => mesh%Z_n +zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot(:) +zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot(:) +zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf(:) +zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf(:) + diff --git a/src/associate_mesh_def.h b/src/associate_mesh_def.h new file mode 100644 index 000000000..1410938ad --- /dev/null +++ b/src/associate_mesh_def.h @@ -0,0 +1,52 @@ +integer , pointer :: nod2D +integer , pointer :: elem2D +integer , pointer :: edge2D +integer , pointer :: edge2D_in +real(kind=WP) , pointer :: ocean_area +real(kind=WP) , pointer :: ocean_areawithcav +integer , pointer :: nl +integer , pointer :: nn_size +real(kind=WP), dimension(:,:), pointer :: coord_nod2D, geo_coord_nod2D +integer, dimension(:,:) , pointer :: elem2D_nodes +integer, dimension(:,:) , pointer :: edges +integer, dimension(:,:) , pointer :: edge_tri +integer, dimension(:,:) , pointer :: elem_edges +real(kind=WP), dimension(:) , pointer :: elem_area +real(kind=WP), dimension(:,:), pointer :: edge_dxdy, edge_cross_dxdy +real(kind=WP), dimension(:) , pointer :: elem_cos, metric_factor +integer, dimension(:,:), pointer :: elem_neighbors +integer, dimension(:,:), pointer :: nod_in_elem2D +real(kind=WP), dimension(:,:), pointer :: x_corners, y_corners +integer, dimension(:) , pointer :: nod_in_elem2D_num +real(kind=WP), dimension(:) , pointer :: depth +real(kind=WP), dimension(:,:), pointer :: gradient_vec +real(kind=WP), dimension(:,:), pointer :: gradient_sca +integer, dimension(:) , pointer :: bc_index_nod2D +real(kind=WP), dimension(:) , pointer :: zbar, Z, elem_depth +integer, dimension(:) , pointer :: nlevels, nlevels_nod2D, nlevels_nod2D_min +real(kind=WP), dimension(:,:), pointer :: area, area_inv, areasvol, areasvol_inv +real(kind=WP), dimension(:) , pointer :: mesh_resolution +real(kind=WP), dimension(:) , pointer :: lump2d_north, lump2d_south +type(sparse_matrix) , pointer :: ssh_stiff +integer, dimension(:) , pointer :: cavity_flag_n, cavity_flag_e +real(kind=WP), dimension(:) , pointer :: cavity_depth +integer, dimension(:) , pointer :: ulevels, ulevels_nod2D, ulevels_nod2D_max +integer, dimension(:) , pointer :: nn_num +integer, dimension(:,:), pointer :: nn_pos + +real(kind=WP), dimension(:,:), pointer :: hnode +real(kind=WP), dimension(:,:), pointer :: hnode_new +real(kind=WP), dimension(:,:), pointer :: zbar_3d_n +real(kind=WP), dimension(:,:), pointer :: Z_3d_n +real(kind=WP), dimension(:,:), pointer :: helem +real(kind=WP), dimension(:) , pointer :: bottom_elem_thickness +real(kind=WP), dimension(:) , pointer :: bottom_node_thickness +real(kind=WP), dimension(:) , pointer :: dhe +real(kind=WP), dimension(:) , pointer :: hbar +real(kind=WP), dimension(:) , pointer :: hbar_old +!real(kind=WP), dimension(:) , pointer :: zbar_n +!real(kind=WP), dimension(:) , pointer :: Z_n +real(kind=WP), dimension(:) , pointer :: zbar_n_bot +real(kind=WP), dimension(:) , pointer :: zbar_e_bot +real(kind=WP), dimension(:) , pointer :: zbar_n_srf +real(kind=WP), dimension(:) , pointer :: zbar_e_srf diff --git a/src/associate_part_ass.h b/src/associate_part_ass.h new file mode 100644 index 000000000..af53de8d2 --- /dev/null +++ b/src/associate_part_ass.h @@ -0,0 +1,62 @@ +MPI_COMM_FESOM => partit%MPI_COMM_FESOM +com_nod2D => partit%com_nod2D +com_elem2D => partit%com_elem2D +com_elem2D_full => partit%com_elem2D_full +myDim_nod2D => partit%myDim_nod2D +eDim_nod2D => partit%eDim_nod2D +myDim_elem2D => partit%myDim_elem2D +eDim_elem2D => partit%eDim_elem2D +eXDim_elem2D => partit%eXDim_elem2D +myDim_edge2D => partit%myDim_edge2D +eDim_edge2D => partit%eDim_edge2D +pe_status => partit%pe_status +elem_full_flag => partit%elem_full_flag +MPIERR => partit%MPIERR +npes => partit%npes +mype => partit%mype +maxPEnum => partit%maxPEnum +part => partit%part + +lb=lbound(partit%s_mpitype_elem3D, 2) +ub=ubound(partit%s_mpitype_elem3D, 2) + +myList_nod2D (1:myDim_nod2D +eDim_nod2D) => partit%myList_nod2D(:) +myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => partit%myList_elem2D(:) +myList_edge2D(1:myDim_edge2D+eDim_edge2D) => partit%myList_edge2D(:) + +if (allocated(partit%remPtr_nod2D)) then + remPtr_nod2D (1:npes) => partit%remPtr_nod2D(:) + remList_nod2D (1:remPtr_nod2D(npes)) => partit%remList_nod2D(:) +end if + +if (allocated(partit%remPtr_elem2D)) then +remPtr_elem2D (1:npes) => partit%remPtr_elem2D(:) +remList_elem2D(1:remPtr_elem2D(npes)) => partit%remList_elem2D(:) +end if + +s_mpitype_elem2D(1:com_elem2D%sPEnum, 1:4) => partit%s_mpitype_elem2D(:,:) +r_mpitype_elem2D(1:com_elem2D%rPEnum, 1:4) => partit%r_mpitype_elem2D(:,:) + +s_mpitype_elem2D_full_i(1:com_elem2D_full%sPEnum) => partit%s_mpitype_elem2D_full_i(:) +r_mpitype_elem2D_full_i(1:com_elem2D_full%rPEnum) => partit%r_mpitype_elem2D_full_i(:) + +s_mpitype_elem2D_full(1:com_elem2D_full%sPEnum, 1:4) => partit%s_mpitype_elem2D_full(:,:) +r_mpitype_elem2D_full(1:com_elem2D_full%rPEnum, 1:4) => partit%r_mpitype_elem2D_full(:,:) + +s_mpitype_elem3D(1:com_elem2D%sPEnum, lb:ub, 1:4) => partit%s_mpitype_elem3D(:,:,:) +r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D(:,:,:) + +s_mpitype_elem3D_full(1:com_elem2D_full%sPEnum, lb:ub, 1:4) => partit%s_mpitype_elem3D_full(:,:,:) +r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D_full(:,:,:) + +r_mpitype_elem3D(1:com_elem2D%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D(:,:,:) +r_mpitype_elem3D_full(1:com_elem2D_full%rPEnum, lb:ub, 1:4) => partit%r_mpitype_elem3D_full(:,:,:) + +s_mpitype_nod2D(1:com_nod2D%sPEnum) => partit%s_mpitype_nod2D(:) +r_mpitype_nod2D(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D(:) + +s_mpitype_nod2D_i(1:com_nod2D%sPEnum) => partit%s_mpitype_nod2D_i(:) +r_mpitype_nod2D_i(1:com_nod2D%rPEnum) => partit%r_mpitype_nod2D_i(:) + +s_mpitype_nod3D(1:com_nod2D%sPEnum, lb:ub, 1:3) => partit%s_mpitype_nod3D(:,:,:) +r_mpitype_nod3D(1:com_nod2D%rPEnum, lb:ub, 1:3) => partit%r_mpitype_nod3D(:,:,:) diff --git a/src/associate_part_def.h b/src/associate_part_def.h new file mode 100644 index 000000000..42145248e --- /dev/null +++ b/src/associate_part_def.h @@ -0,0 +1,39 @@ + + integer, pointer :: MPI_COMM_FESOM ! FESOM communicator (for ocean only runs if often a copy of MPI_COMM_WORLD) + type(com_struct), pointer :: com_nod2D + type(com_struct), pointer :: com_elem2D + type(com_struct), pointer :: com_elem2D_full + integer :: ub, lb ! to work with r(s)_mpitype_elem3D(nod3D) + + integer, dimension(:), pointer :: s_mpitype_edge2D, r_mpitype_edge2D + integer, dimension(:,:), pointer :: s_mpitype_elem2D, r_mpitype_elem2D + integer, dimension(:), pointer :: s_mpitype_elem2D_full_i, r_mpitype_elem2D_full_i + integer, dimension(:,:), pointer :: s_mpitype_elem2D_full, r_mpitype_elem2D_full + integer, dimension(:,:,:), pointer :: s_mpitype_elem3D, r_mpitype_elem3D + integer, dimension(:,:,:), pointer :: s_mpitype_elem3D_full, r_mpitype_elem3D_full + + integer, dimension(:), pointer :: s_mpitype_nod2D, r_mpitype_nod2D + integer, dimension(:), pointer :: s_mpitype_nod2D_i, r_mpitype_nod2D_i + integer, dimension(:,:,:), pointer :: s_mpitype_nod3D, r_mpitype_nod3D + + integer, pointer :: MPIERR + integer, pointer :: npes + integer, pointer :: mype + integer, pointer :: maxPEnum + + integer, dimension(:), pointer :: part + + ! Mesh partition + integer, pointer :: myDim_nod2D, eDim_nod2D + integer, dimension(:), pointer :: myList_nod2D + integer, pointer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + integer, dimension(:), pointer :: myList_elem2D + integer, pointer :: myDim_edge2D, eDim_edge2D + integer, dimension(:), pointer :: myList_edge2D + + integer, pointer :: pe_status + + integer, dimension(:), pointer :: remPtr_nod2D(:), remList_nod2D(:) + integer, dimension(:), pointer :: remPtr_elem2D(:), remList_elem2D(:) + + logical, pointer :: elem_full_flag diff --git a/src/async_threads_cpp/CMakeLists.txt b/src/async_threads_cpp/CMakeLists.txt index 1af6c8fff..d72d7ce7d 100644 --- a/src/async_threads_cpp/CMakeLists.txt +++ b/src/async_threads_cpp/CMakeLists.txt @@ -8,7 +8,11 @@ file(GLOB sources_CXX ${CMAKE_CURRENT_LIST_DIR}/*.cpp) include(FortranCInterface) FortranCInterface_HEADER(ThreadsManagerFCMacros.h MACRO_NAMESPACE "ThreadsManagerFCMacros_" SYMBOLS init_ccall begin_ccall end_ccall) -add_library(${PROJECT_NAME} ${sources_CXX}) +if(${BUILD_FESOM_AS_LIBRARY}) + add_library(${PROJECT_NAME} ${sources_CXX}) +else() + add_library(${PROJECT_NAME} ${sources_CXX}) +endif() target_include_directories(${PROJECT_NAME} INTERFACE ${CMAKE_CURRENT_LIST_DIR} PUBLIC ${CMAKE_CURRENT_BINARY_DIR} @@ -18,3 +22,7 @@ if(${CMAKE_CXX_COMPILER_ID} STREQUAL Cray ) else() target_compile_options(${PROJECT_NAME} PRIVATE -std=c++11) endif() +if(${BUILD_FESOM_AS_LIBRARY}) + target_compile_options(${PROJECT_NAME} PRIVATE -fPIC) + install(TARGETS ${PROJECT_NAME} DESTINATION "${FESOM_INSTALL_PREFIX}/lib") +endif() diff --git a/src/cavity_param.F90 b/src/cavity_param.F90 index ba6e2270c..ac5409844 100644 --- a/src/cavity_param.F90 +++ b/src/cavity_param.F90 @@ -1,3 +1,64 @@ +module cavity_interfaces + interface + subroutine cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(in) , target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine cavity_heat_water_fluxes_2eq(ice, tracers, partit, mesh) + USE MOD_ICE + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine cavity_ice_clean_vel(ice, partit, mesh) + use MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine cavity_ice_clean_ma(ice, partit, mesh) + use MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine cavity_momentum_fluxes(dynamics, partit, mesh) + use MOD_DYN + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_dyn), intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface +end module + + ! ! !_______________________________________________________________________________ @@ -5,20 +66,24 @@ ! that have at least one cavity nodes as nearest neighbour. ! Than compute for all cavity points (ulevels_nod2D>1), which is the closest ! cavity line point to that point --> use their coordinates and depth -subroutine compute_nrst_pnt2cavline(mesh) +subroutine compute_nrst_pnt2cavline(partit, mesh) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM , only: WP - use o_ARRAYS, only: Z_3d_n - use g_PARSUP implicit none - type(t_mesh), intent(inout) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh integer :: node, kk, elnodes(3), gnode, aux_idx integer, allocatable, dimension(:) :: cavl_idx, lcl_cavl_idx real(kind=WP), allocatable, dimension(:) :: cavl_lon, cavl_lat, cavl_dep,lcl_cavl_lon, lcl_cavl_lat, lcl_cavl_dep real(kind=WP) :: aux_x, aux_y, aux_d, aux_dmin -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ if (mype==0) write(*,*) ' --> compute cavity line ' @@ -120,24 +185,29 @@ end subroutine compute_nrst_pnt2cavline ! adjusted for use in FESOM by Ralph Timmermann, 16.02.2011 ! Reviewed by ? ! adapted by P. SCholz for FESOM2.0 -subroutine cavity_heat_water_fluxes_3eq(mesh) +subroutine cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN + use MOD_ICE use o_PARAM , only: density_0, WP - use o_ARRAYS, only: heat_flux, water_flux, tr_arr, Z_3d_n, Unode, density_m_rho0,density_ref - use i_ARRAYS, only: net_heat_flux, fresh_wa_flux - use g_PARSUP + use o_ARRAYS, only: heat_flux, water_flux, density_m_rho0, density_ref implicit none - !___________________________________________________________________________ - type(t_mesh), intent(inout) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + type(t_tracer), intent(in) , target :: tracers + type(t_ice) , intent(inout), target :: ice + type(t_dyn), intent(in), target :: dynamics real (kind=WP) :: temp,sal,tin,zice real (kind=WP) :: rhow, rhor, rho real (kind=WP) :: gats1, gats2, gas, gat real (kind=WP) :: ep1,ep2,ep3,ep4,ep5,ep31 real (kind=WP) :: ex1,ex2,ex3,ex4,ex5,ex6 real (kind=WP) :: vt1,sr1,sr2,sf1,sf2,tf1,tf2,tf,sf,seta,re - integer :: node, nzmax, nzmin - + integer :: node, nzmax, nzmin !___________________________________________________________________________ real(kind=WP),parameter :: rp = 0. !reference pressure real(kind=WP),parameter :: a = -0.0575 !Foldvik&Kvinge (1974) @@ -168,17 +238,25 @@ subroutine cavity_heat_water_fluxes_3eq(mesh) ! hemw= 4.02*14. ! oomw= -30. ! oofw= -2.5 + real(kind=WP), dimension(:,:,:), pointer :: UVnode + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux +! real(kind=WP), dimension(:) , pointer :: net_heat_flux +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) -#include "associate_mesh.h" - !___________________________________________________________________________ do node=1,myDim_nod2D !+eDim_nod2D nzmin = ulevels_nod2D(node) if(nzmin==1) cycle ! if no cavity skip that node !_______________________________________________________________________ - temp = tr_arr(nzmin, node,1) - sal = tr_arr(nzmin, node,2) + temp = tracers%data(1)%values(nzmin,node) + sal = tracers%data(2)%values(nzmin,node) zice = Z_3d_n(nzmin, node) !(<0) !_______________________________________________________________________ @@ -194,7 +272,7 @@ subroutine cavity_heat_water_fluxes_3eq(mesh) ! if(vt1.eq.0.) vt1=0.001 !rt re = Hz_r(i,j,N)*ds/un !Reynolds number - vt1 = sqrt(Unode(1,nzmin,node)*Unode(1,nzmin,node)+Unode(2,nzmin,node)*Unode(2,nzmin,node)) + vt1 = sqrt(UVnode(1,nzmin,node)*UVnode(1,nzmin,node)+UVnode(2,nzmin,node)*UVnode(2,nzmin,node)) vt1 = max(vt1,0.001_WP) !vt1 = max(vt1,0.005) ! CW re = 10._WP/un !vt1*re (=velocity times length scale over kinematic viscosity) is the Reynolds number @@ -305,22 +383,33 @@ end subroutine cavity_heat_water_fluxes_3eq ! Compute the heat and freshwater fluxes under ice cavity using simple 2equ. ! Coded by Adriana Huerta-Casas ! Reviewed by Qiang Wang -subroutine cavity_heat_water_fluxes_2eq(mesh) +subroutine cavity_heat_water_fluxes_2eq(ice, tracers, partit, mesh) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_ICE use o_PARAM , only: WP - use o_ARRAYS, only: heat_flux, water_flux, tr_arr, Z_3d_n - use i_ARRAYS, only: net_heat_flux, fresh_wa_flux - use g_PARSUP + use o_ARRAYS, only: heat_flux, water_flux implicit none - type(t_mesh), intent(inout) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + type(t_tracer), intent(in) , target :: tracers + type(t_ice) , intent(inout), target :: ice integer :: node, nzmin real(kind=WP) :: gama, L, aux real(kind=WP) :: c2, c3, c4, c5, c6 real(kind=WP) :: t_i, s_i, p, t_fz + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux +! real(kind=WP), dimension(:) , pointer :: net_heat_flux +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) -#include "associate_mesh.h" - !___________________________________________________________________________ ! parameter for computing heat and water fluxes gama = 1.0e-4_WP ! heat exchange velocity [m/s] @@ -336,8 +425,8 @@ subroutine cavity_heat_water_fluxes_2eq(mesh) do node=1,myDim_nod2D nzmin = ulevels_nod2D(node) if(nzmin==1) cycle - t_i = tr_arr(nzmin,node,1) - s_i = tr_arr(nzmin,node,2) + t_i = tracers%data(1)%values(nzmin,node) + s_i = tracers%data(2)%values(nzmin,node) t_fz = c3*(s_i**(3./2.)) + c4*(s_i**2) + c5*s_i + c6*abs(Z_3d_n(nzmin,node)) heat_flux(node)=vcpw*gama*(t_i - t_fz) ! Hunter2006 used cpw=3974J/Kg (*rhowat) @@ -353,20 +442,29 @@ end subroutine cavity_heat_water_fluxes_2eq !_______________________________________________________________________________ ! Compute the momentum fluxes under ice cavity ! Moved to this separated routine by Qiang, 20.1.2012 -subroutine cavity_momentum_fluxes(mesh) +subroutine cavity_momentum_fluxes(dynamics, partit, mesh) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN use o_PARAM , only: density_0, C_d, WP - use o_ARRAYS, only: UV, stress_surf - use i_ARRAYS, only: u_w, v_w - use g_PARSUP + use o_ARRAYS, only: stress_surf, stress_node_surf implicit none !___________________________________________________________________________ - type(t_mesh), intent(inout) , target :: mesh - integer :: elem, elnodes(3), nzmin + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + integer :: elem, elnodes(3), nzmin, node real(kind=WP) :: aux - -#include "associate_mesh.h" + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) + UVnode=>dynamics%uvnode(:,:,:) !___________________________________________________________________________ do elem=1,myDim_elem2D @@ -381,20 +479,45 @@ subroutine cavity_momentum_fluxes(mesh) stress_surf(1,elem)=-aux*UV(1,nzmin,elem) stress_surf(2,elem)=-aux*UV(2,nzmin,elem) end do + + !___________________________________________________________________________ + do node=1,myDim_nod2D+eDim_nod2D + nzmin = ulevels_nod2d(node) + if(nzmin==1) cycle + + ! momentum stress: + ! need to check the sensitivity to the drag coefficient + ! here I use the bottom stress coefficient, which is 3e-3, for this FO2 work. + aux=sqrt(UVnode(1,nzmin,node)**2+UVnode(2,nzmin,node)**2)*density_0*C_d + stress_node_surf(1,node)=-aux*UVnode(1,nzmin,node) + stress_node_surf(2,node)=-aux*UVnode(2,nzmin,node) + end do end subroutine cavity_momentum_fluxes ! ! !_______________________________________________________________________________ -subroutine cavity_ice_clean_vel(mesh) - use MOD_MESH - use i_ARRAYS, only: U_ice, V_ice - use g_PARSUP +subroutine cavity_ice_clean_vel(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH implicit none - type(t_mesh), intent(inout) , target :: mesh - integer :: node + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: node + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) -#include "associate_mesh.h" - + !___________________________________________________________________________ do node=1,myDim_nod2d+eDim_nod2d if(ulevels_nod2D(node)>1) then U_ice(node)=0._WP @@ -405,16 +528,29 @@ end subroutine cavity_ice_clean_vel ! ! !_______________________________________________________________________________ -subroutine cavity_ice_clean_ma(mesh) - use MOD_MESH - use i_ARRAYS, only: m_ice, m_snow, a_ice - use g_PARSUP +subroutine cavity_ice_clean_ma(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH implicit none - type(t_mesh), intent(inout) , target :: mesh - integer :: node + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: node + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) -#include "associate_mesh.h" - + !___________________________________________________________________________ do node=1,myDim_nod2d+eDim_nod2d if(ulevels_nod2D(node)>1) then m_ice(node) =0.0_WP @@ -445,11 +581,12 @@ end subroutine dist_on_earth ! [oC] (TIN) bezogen auf den in-situ Druck[dbar] (PRES) mit Hilfe ! eines Iterationsverfahrens aus. subroutine potit(salz,pt,pres,rfpres,tin) + use o_PARAM , only: WP integer iter - real salz,pt,pres,rfpres,tin - real epsi,tpmd,pt1,ptd,pttmpr + real(kind=WP) :: salz,pt,pres,rfpres,tin + real(kind=WP) :: epsi, pt1,ptd,pttmpr - data tpmd / 0.001 / + real(kind=WP), parameter :: tpmd=0.001_WP epsi = 0. do iter=1,100 @@ -476,15 +613,19 @@ end subroutine potit ! TEMP = 40.0 DegC ! PRES = 10000.000 dbar ! RFPRES = 0.000 dbar -real function pttmpr(salz,temp,pres,rfpres) - - data ct2 ,ct3 /0.29289322 , 1.707106781/ - data cq2a,cq2b /0.58578644 , 0.121320344/ - data cq3a,cq3b /3.414213562, -4.121320344/ +real(kind=WP) function pttmpr(salz,temp,pres,rfpres) + use o_PARAM , only: WP - real salz,temp,pres,rfpres - real p,t,dp,dt,q,ct2,ct3,cq2a,cq2b,cq3a,cq3b - real adlprt + real(kind=WP) :: salz,temp,pres,rfpres + real(kind=WP) :: p,t,dp,dt,q + real(kind=WP) :: adlprt + real(kind=WP), parameter :: ct2 = 0.29289322_WP + real(kind=WP), parameter :: ct3 = 1.707106781_WP + real(kind=WP), parameter :: cq2a = 0.58578644_WP + real(kind=WP), parameter :: cq2b = 0.121320344_WP + real(kind=WP), parameter :: cq3a = 3.414213562_WP + real(kind=WP), parameter :: cq3b = -4.121320344_WP + p = pres t = temp @@ -515,17 +656,26 @@ end function pttmpr ! fuer SALZ = 40.0 psu ! TEMP = 40.0 DegC ! PRES = 10000.000 dbar -real function adlprt(salz,temp,pres) +real(kind=WP) function adlprt(salz,temp,pres) - real salz,temp,pres - real s0,a0,a1,a2,a3,b0,b1,c0,c1,c2,c3,d0,d1,e0,e1,e2,ds - - data s0 /35.0/ - data a0,a1,a2,a3 /3.5803E-5, 8.5258E-6, -6.8360E-8, 6.6228E-10/ - data b0,b1 /1.8932E-6, -4.2393E-8/ - data c0,c1,c2,c3 /1.8741E-8, -6.7795E-10, 8.7330E-12, -5.4481E-14/ - data d0,d1 /-1.1351E-10, 2.7759E-12/ - data e0,e1,e2 /-4.6206E-13, 1.8676E-14, -2.1687E-16/ + use o_PARAM , only: WP + real(kind=WP) :: salz,temp,pres, ds + real(kind=WP), parameter :: s0 = 35.0 + real(kind=WP), parameter :: a0 = 3.5803E-5 + real(kind=WP), parameter :: a1 = 8.5258E-6 + real(kind=WP), parameter :: a2 = -6.8360E-8 + real(kind=WP), parameter :: a3 = 6.6228E-10 + real(kind=WP), parameter :: b0 = 1.8932E-6 + real(kind=WP), parameter :: b1 = -4.2393E-8 + real(kind=WP), parameter :: c0 = 1.8741E-8 + real(kind=WP), parameter :: c1 = -6.7795E-10 + real(kind=WP), parameter :: c2 = 8.7330E-12 + real(kind=WP), parameter :: c3 = -5.4481E-14 + real(kind=WP), parameter :: d0 = -1.1351E-10 + real(kind=WP), parameter :: d1 = 2.7759E-12 + real(kind=WP), parameter :: e0 = -4.6206E-13 + real(kind=WP), parameter :: e1 = 1.8676E-14 + real(kind=WP), parameter :: e2 = -2.1687E-16 ds = salz-s0 adlprt = ( ( (e2*temp + e1)*temp + e0 )*pres & diff --git a/src/command_line_options.F90 b/src/command_line_options.F90 new file mode 100644 index 000000000..7b66ddae1 --- /dev/null +++ b/src/command_line_options.F90 @@ -0,0 +1,47 @@ +module command_line_options_module +! synopsis: read options passed to the main executable and trigger corresponding actions + + implicit none + public command_line_options + private + + type :: command_line_options_type + contains + procedure, nopass :: parse + end type + type(command_line_options_type) command_line_options + +contains + + subroutine parse() + use info_module + integer i + character(len=:), allocatable :: arg + integer arglength + + do i = 1, command_argument_count() + call get_command_argument(i, length=arglength) + allocate(character(arglength) :: arg) + call get_command_argument(i, value=arg) + select case (arg) + case('--smoketest') + print '(g0)', 'smoketest' + case('--info') + print '(g0)', '# Definitions' + call info%print_definitions() + print '(g0)', '# compiled with OpenMP?' +#ifdef _OPENMP + print '(g0)', '_OPENMP is ON' +#else + print '(g0)', '_OPENMP is OFF' +#endif + case default + print *, 'unknown option: ', arg + error stop + end select + deallocate(arg) + end do + + end subroutine + +end module diff --git a/src/cpl_driver.F90 b/src/cpl_driver.F90 index c87a83522..b3e4ef749 100755 --- a/src/cpl_driver.F90 +++ b/src/cpl_driver.F90 @@ -15,7 +15,7 @@ module cpl_driver use mod_oasis ! oasis module use g_config, only : dt use o_param, only : rad - use g_PARSUP + USE MOD_PARTIT implicit none save ! @@ -23,7 +23,7 @@ module cpl_driver ! #if defined (__oifs) - integer, parameter :: nsend = 5 + integer, parameter :: nsend = 7 integer, parameter :: nrecv = 13 #else integer, parameter :: nsend = 4 @@ -92,8 +92,9 @@ module cpl_driver contains - subroutine cpl_oasis3mct_init( localCommunicator ) - implicit none + subroutine cpl_oasis3mct_init(partit, localCommunicator ) + USE MOD_PARTIT + implicit none save !------------------------------------------------------------------- @@ -104,12 +105,13 @@ subroutine cpl_oasis3mct_init( localCommunicator ) ! Arguments ! integer, intent(OUT) :: localCommunicator + type(t_partit), intent(inout), target :: partit ! ! Local declarations ! !-------------------------------------------------------------------- ! - + #ifdef VERBOSE print *, '=================================================' print *, 'cpl_oasis3mct_init : coupler initialization for OASIS3-MCT' @@ -142,12 +144,12 @@ subroutine cpl_oasis3mct_init( localCommunicator ) ENDIF ! Get MPI size and rank - CALL MPI_Comm_Size ( localCommunicator, npes, ierror ) + CALL MPI_Comm_Size ( localCommunicator, partit%npes, ierror ) IF (ierror /= 0) THEN CALL oasis_abort(comp_id, 'cpl_oasis3mct_init', 'comm_size failed.') ENDIF - CALL MPI_Comm_Rank ( localCommunicator, mype, ierror ) + CALL MPI_Comm_Rank ( localCommunicator, partit%mype, ierror ) IF (ierror /= 0) THEN CALL oasis_abort(comp_id, 'cpl_oasis3mct_init', 'comm_rank failed.') ENDIF @@ -158,7 +160,7 @@ end subroutine cpl_oasis3mct_init ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - subroutine cpl_oasis3mct_define_unstr(mesh) + subroutine cpl_oasis3mct_define_unstr(partit, mesh) #ifdef __oifs use mod_oasis_auxiliary_routines, ONLY: oasis_get_debug, oasis_set_debug @@ -166,11 +168,14 @@ subroutine cpl_oasis3mct_define_unstr(mesh) use mod_oasis_method, ONLY: oasis_get_debug, oasis_set_debug #endif use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP use g_rotate_grid use mod_oasis, only: oasis_write_area, oasis_write_mask implicit none save - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit !------------------------------------------------------------------- ! Definition of grid and field information for ocean ! exchange between FESOM, ECHAM6 and OASIS3-MCT. @@ -208,8 +213,8 @@ subroutine cpl_oasis3mct_define_unstr(mesh) integer :: my_number_of_points integer :: number_of_all_points - integer :: counts_from_all_pes(npes) - integer :: displs_from_all_pes(npes) + integer :: counts_from_all_pes(partit%npes) + integer :: displs_from_all_pes(partit%npes) integer :: my_displacement integer,allocatable :: unstr_mask(:,:) @@ -225,7 +230,11 @@ subroutine cpl_oasis3mct_define_unstr(mesh) real(kind=WP), allocatable :: all_y_coords(:, :) ! latitude coordinates real(kind=WP), allocatable :: all_area(:,:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + #ifdef VERBOSE print *, '==============================================================' @@ -384,6 +393,8 @@ subroutine cpl_oasis3mct_define_unstr(mesh) cpl_send( 3)='snt_feom' ! 3. snow thickness [m] -> cpl_send( 4)='ist_feom' ! 4. sea ice surface temperature [K] -> cpl_send( 5)='sia_feom' ! 5. sea ice albedo [%-100] -> + cpl_send( 6)='u_feom' ! 6. eastward surface velocity [m/s] -> + cpl_send( 7)='v_feom' ! 7. northward surface velocity [m/s] -> #else cpl_send( 1)='sst_feom' ! 1. sea surface temperature [°C] -> cpl_send( 2)='sit_feom' ! 2. sea ice thickness [m] -> @@ -479,10 +490,9 @@ subroutine cpl_oasis3mct_define_unstr(mesh) call oasis_enddef(ierror) if (commRank) print *, 'fesom oasis_enddef: COMPLETED' - #ifndef __oifs if (commRank) print *, 'FESOM: calling exchange_roots' - call exchange_roots(source_root, target_root, 1, MPI_COMM_FESOM, MPI_COMM_WORLD) + call exchange_roots(source_root, target_root, 1, partit%MPI_COMM_FESOM, MPI_COMM_WORLD) if (commRank) print *, 'FESOM source/target roots: ', source_root, target_root #endif @@ -504,8 +514,10 @@ end subroutine cpl_oasis3mct_define_unstr ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - subroutine cpl_oasis3mct_send(ind, data_array, action) + subroutine cpl_oasis3mct_send(ind, data_array, action, partit) use o_param + USE MOD_PARTIT + USE MOD_PARSUP implicit none save !--------------------------------------------------------------------- @@ -523,7 +535,8 @@ subroutine cpl_oasis3mct_send(ind, data_array, action) ! integer, intent( IN ) :: ind ! variable Id logical, intent( OUT ) :: action ! - real(kind=WP), intent(IN) :: data_array(myDim_nod2D+eDim_nod2D) + type(t_partit), intent(in) :: partit + real(kind=WP), intent(IN) :: data_array(partit%myDim_nod2D+partit%eDim_nod2D) ! ! Local declarations ! @@ -540,11 +553,11 @@ subroutine cpl_oasis3mct_send(ind, data_array, action) cplsnd(ind, :)=cplsnd(ind, :)+data_array ! call do_oce_2_atm(cplsnd(ind, :)/real(o2a_call_count), atm_fld, 1) - exfld = cplsnd(ind, 1:myDim_nod2D)/real(o2a_call_count) + exfld = cplsnd(ind, 1:partit%myDim_nod2D)/real(o2a_call_count) t2=MPI_Wtime() #ifdef VERBOSE - if (mype==0) then + if (partit%mype==0) then print *, 'FESOM oasis_send: ', cpl_send(ind) endif #endif @@ -571,9 +584,11 @@ end subroutine cpl_oasis3mct_send ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ! ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - subroutine cpl_oasis3mct_recv(ind, data_array, action) + subroutine cpl_oasis3mct_recv(ind, data_array, action, partit) use o_param use g_comm_auto + USE MOD_PARTIT + USE MOD_PARSUP implicit none save !--------------------------------------------------------------------- @@ -586,6 +601,7 @@ subroutine cpl_oasis3mct_recv(ind, data_array, action) integer, intent( IN ) :: ind ! variable Id logical, intent( OUT ) :: action ! real(kind=WP), intent( OUT ) :: data_array(:) + type(t_partit), intent(inout), target :: partit ! ! Local declarations ! @@ -615,8 +631,8 @@ subroutine cpl_oasis3mct_recv(ind, data_array, action) ! and delivered back to FESOM. action=(info==3 .OR. info==10 .OR. info==11 .OR. info==12 .OR. info==13) if (action) then - data_array(1:myDim_nod2d) = exfld - call exchange_nod(data_array) + data_array(1:partit%myDim_nod2d) = exfld + call exchange_nod(data_array, partit) end if t3=MPI_Wtime() if (ind==1) then @@ -643,14 +659,15 @@ SUBROUTINE exchange_roots(source_root, target_root, il_side, & !global_comm (i.e. comm_psmile here) IMPLICIT NONE - - INTEGER, INTENT(IN) :: il_side - INTEGER, INTENT(IN) :: local_comm, global_comm - INTEGER, INTENT(OUT) :: source_root, target_root + + INTEGER, INTENT(IN) :: il_side + INTEGER, INTENT(IN) :: local_comm, global_comm + INTEGER, INTENT(OUT) :: source_root, target_root INTEGER :: status(MPI_STATUS_SIZE) INTEGER :: local_rank, my_global_rank, ierror + source_root = 500000 target_root = 500000 diff --git a/src/cvmix_idemix.F90 b/src/cvmix_idemix.F90 index 88396c143..b8a215a92 100644 --- a/src/cvmix_idemix.F90 +++ b/src/cvmix_idemix.F90 @@ -476,12 +476,29 @@ subroutine integrate_idemix( & type(idemix_type), pointer ::idemix_constants_in ! initialize diagnostics - iwe_Ttot = 0.0 - iwe_Tdif = 0.0 - iwe_Tdis = 0.0 - iwe_Tsur = 0.0 - iwe_Tbot = 0.0 - + iwe_new = 0.0 + cvmix_int_1 = 0.0 + cvmix_int_2 = 0.0 + cvmix_int_3 = 0.0 + iwe_Ttot = 0.0 + iwe_Tdif = 0.0 + iwe_Tdis = 0.0 + iwe_Tsur = 0.0 + iwe_Tbot = 0.0 + c0 = 0.0 + v0 = 0.0 + alpha_c = 0.0 + a_dif = 0.0 + b_dif = 0.0 + c_dif = 0.0 + a_tri = 0.0 + b_tri = 0.0 + c_tri = 0.0 + d_tri = 0.0 + delta = 0.0 + iwe_max = 0.0 + forc = 0.0 + ! FIXME: nils: Is this necessary? idemix_constants_in => idemix_constants_saved if (present(idemix_userdef_constants)) then diff --git a/src/cvmix_tke.F90 b/src/cvmix_tke.F90 index 1fa16f8db..0925a64de 100644 --- a/src/cvmix_tke.F90 +++ b/src/cvmix_tke.F90 @@ -1,6 +1,6 @@ -module cvmix_tke -!! This module contains the main computations of diffusivities based on +module cvmix_tke +!! This module contains the main computations of diffusivities based on !! TKE (following Gaspar'90) with the calculation of the mixing length following (Blanke, B., P. Delecluse) !! !! @see Gaspar, P., Y. Grégoris, and J.-M. Lefevre @@ -13,6 +13,10 @@ module cvmix_tke !! @author Hannah Kleppin, MPIMET/University of Hamburg !! @author Oliver Gutjahr, MPIMET !! +!! @par Copyright +!! 2002-2013 by MPI-M +!! This software is provided for non-commercial use only. +!! See the LICENSE and the WARRANTY conditions. !! use cvmix_kinds_and_types, only : cvmix_r8, & @@ -31,7 +35,6 @@ module cvmix_tke !public member functions - public :: init_tke public :: cvmix_coeffs_tke public :: put_tke @@ -68,6 +71,7 @@ module cvmix_tke c_eps ,& ! dissipation parameter cd ,& ! alpha_tke ,& ! + clc ,& ! factor for Langmuir turbulence mxl_min ,& ! minimum value for mixing length kappaM_min ,& ! minimum value for Kappa momentum kappaM_max ,& ! maximum value for Kappa momentum @@ -81,6 +85,7 @@ module cvmix_tke logical :: & only_tke ,& + l_lc ,& use_ubound_dirichlet ,& use_lbound_dirichlet @@ -94,7 +99,7 @@ module cvmix_tke subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, & tke_mxl_choice, use_ubound_dirichlet, use_lbound_dirichlet, & - handle_old_vals, only_tke, tke_min, tke_surf_min, & + handle_old_vals, only_tke, l_lc, clc, tke_min, tke_surf_min, & tke_userdef_constants) ! This subroutine sets user or default values for TKE parameters @@ -108,6 +113,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, KappaM_min ,& KappaM_max ,& tke_surf_min ,& + clc ,& tke_min integer, intent(in),optional :: & @@ -116,6 +122,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, logical, intent(in), optional :: & only_tke ,& + l_lc ,& use_ubound_dirichlet ,& use_lbound_dirichlet @@ -124,7 +131,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, ! FIXME: not sure about the allowed ranges for TKE parameters if (present(c_k)) then - if(c_k.lt. 0.05d0 .or. c_k .gt. 0.3d0) then + if(c_k.lt. 0.0d0 .or. c_k .gt. 1.5d0) then print*, "ERROR:c_k can only be allowed_range" stop 1 end if @@ -134,7 +141,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if if (present(c_eps)) then - if(c_eps.lt. 0.5d0 .or. c_eps .gt. 1.d0) then + if(c_eps.lt. 0.d0 .or. c_eps .gt. 10.d0) then print*, "ERROR:c_eps can only be allowed_range" stop 1 end if @@ -154,13 +161,13 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if if (present(alpha_tke)) then - if(alpha_tke.lt. 1.d0 .or. alpha_tke .gt. 30.d0) then + if(alpha_tke.lt. 1.d0 .or. alpha_tke .gt. 90.d0) then print*, "ERROR:alpha_tke can only be allowed_range" stop 1 end if call put_tke('alpha_tke', alpha_tke, tke_userdef_constants) else - call put_tke('alpha_tke', 30._cvmix_r8, tke_userdef_constants) + call put_tke('alpha_tke', 30.0_cvmix_r8, tke_userdef_constants) end if if (present(mxl_min)) then @@ -170,7 +177,7 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('mxl_min', mxl_min, tke_userdef_constants) else - call put_tke('mxl_min', 1._cvmix_r8-8, tke_userdef_constants) + call put_tke('mxl_min', 1.0e-8_cvmix_r8, tke_userdef_constants) end if if (present(KappaM_min)) then @@ -180,17 +187,17 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, end if call put_tke('kappaM_min', KappaM_min, tke_userdef_constants) else - call put_tke('kappaM_min', 0._cvmix_r8, tke_userdef_constants) + call put_tke('kappaM_min', 0.0_cvmix_r8, tke_userdef_constants) end if if (present(KappaM_max)) then - if(KappaM_max.lt. 1.d0 .or. KappaM_max .gt. 100.d0) then + if(KappaM_max.lt. 10.d0 .or. KappaM_max .gt. 1000.d0) then print*, "ERROR:kappaM_max can only be allowed_range" stop 1 end if call put_tke('kappaM_max', KappaM_max, tke_userdef_constants) else - call put_tke('kappaM_max', 100._cvmix_r8, tke_userdef_constants) + call put_tke('kappaM_max', 100.0_cvmix_r8, tke_userdef_constants) end if if (present(tke_mxl_choice)) then @@ -213,24 +220,35 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, call put_tke('handle_old_vals', 1, tke_userdef_constants) end if +if (present(clc)) then + if(clc.lt. 0.0 .or. clc .gt. 30.0) then + print*, "ERROR:clc can only be allowed_range" + stop 1 + end if + call put_tke('clc', clc, tke_userdef_constants) +else + call put_tke('clc',0.15_cvmix_r8 , tke_userdef_constants) +end if + + if (present(tke_min)) then - if(tke_min.lt. 1.d-7 .or. tke_min.gt. 1.d-4 ) then - print*, "ERROR:tke_min can only be 10^-7 to 10^-4" + if(tke_min.lt. 1.d-9 .or. tke_min.gt. 1.d-2 ) then + print*, "ERROR:tke_min can only be allowed_range" stop 1 end if call put_tke('tke_min', tke_min, tke_userdef_constants) else - call put_tke('tke_min', 1._cvmix_r8-6, tke_userdef_constants) + call put_tke('tke_min', 1.0e-6_cvmix_r8, tke_userdef_constants) end if if (present(tke_surf_min)) then if(tke_surf_min.lt. 1.d-7 .or. tke_surf_min.gt. 1.d-2 ) then - print*, "ERROR:tke_surf_min can only be 10^-7 to 10^-4" + print*, "ERROR:tke_surf_min can only be allowed_range" stop 1 end if call put_tke('tke_surf_min', tke_surf_min, tke_userdef_constants) else - call put_tke('tke_surf_min', 1._cvmix_r8-4, tke_userdef_constants) + call put_tke('tke_surf_min', 1.e-4_cvmix_r8, tke_userdef_constants) end if if (present(use_ubound_dirichlet)) then @@ -252,6 +270,13 @@ subroutine init_tke(c_k, c_eps, cd, alpha_tke, mxl_min, KappaM_min, KappaM_max, call put_tke('only_tke', .true., tke_userdef_constants) end if +if (present(l_lc)) then + + call put_tke('l_lc', l_lc, tke_userdef_constants) +else + call put_tke('l_lc', .false., tke_userdef_constants) +end if + end subroutine init_tke !================================================================================= @@ -285,6 +310,7 @@ subroutine tke_wrap(Vmix_vars, Vmix_params, tke_userdef_constants) !tke ,& tke_Lmix ,& tke_Pr ,& + tke_plc ,& !by_Oliver new_KappaM ,& ! new_KappaH ,& ! new_tke ,& ! @@ -351,6 +377,7 @@ subroutine tke_wrap(Vmix_vars, Vmix_params, tke_userdef_constants) tke_Ttot = tke_Ttot, & tke_Lmix = tke_Lmix, & tke_Pr = tke_Pr, & + tke_plc = tke_plc, & !by_Oliver ! debugging cvmix_int_1 = cvmix_int_1, & cvmix_int_2 = cvmix_int_2, & @@ -414,6 +441,7 @@ subroutine integrate_tke( & !tke, & tke_Lmix, & ! diagnostic tke_Pr, & ! diagnostic + tke_plc, & ! langmuir turbulence forc_tke_surf, & E_iw, & dtime, & @@ -427,6 +455,7 @@ subroutine integrate_tke( & grav, & ! FIXME: today: put to initialize alpha_c, & ! FIXME: today: put to initialize tke_userdef_constants) +!subroutine integrate_tke(jc, blockNo, tstep_count) type(tke_type), intent(in), optional, target :: & tke_userdef_constants @@ -450,9 +479,13 @@ subroutine integrate_tke( & real(cvmix_r8), dimension(nlev), intent(in) :: & dzw ! + + ! Langmuir turbulence + real(cvmix_r8), dimension(nlev+1), intent(in), optional :: & + tke_plc ! IDEMIX variables, if run coupled iw_diss is added as forcing to TKE - real(cvmix_r8), dimension(max_nlev), intent(in), optional :: & + real(cvmix_r8), dimension(nlev+1), intent(in), optional :: & E_iw ,& ! alpha_c ,& ! iw_diss ! @@ -468,7 +501,7 @@ subroutine integrate_tke( & real(cvmix_r8), intent(in) :: & forc_tke_surf - !real(cvmix_r8),dimension(nlev+1), intent(in), optional :: & + !real(cvmix_r8),dimension(max_nlev+1), intent(in), optional :: & ! Kappa_GM ! ! NEW values @@ -479,7 +512,7 @@ subroutine integrate_tke( & KappaH_out ! diagnostics - real(cvmix_r8), dimension(nlev+1) :: & + real(cvmix_r8), dimension(nlev+1), intent(out) :: & tke_Tbpr ,& tke_Tspr ,& tke_Tdif ,& @@ -491,6 +524,7 @@ subroutine integrate_tke( & !tke ,& tke_Lmix ,& tke_Pr !,& + real(cvmix_r8), dimension(nlev+1), intent(out) :: & cvmix_int_1 ,& cvmix_int_2 ,& @@ -519,11 +553,12 @@ subroutine integrate_tke( & KappaM_max ,& ! mxl_min ,& ! {1e-8} c_k ,& ! {0.1} + clc ,& ! {0.15} tke_surf_min ,& ! {1e-4} tke_min ! {1e-6} integer :: tke_mxl_choice - logical :: only_tke, use_ubound_dirichlet, use_lbound_dirichlet + logical :: only_tke, use_ubound_dirichlet, use_lbound_dirichlet,l_lc real(cvmix_r8) :: & zzw ,& ! depth of interface k @@ -556,7 +591,7 @@ subroutine integrate_tke( & tke_constants_in => tke_userdef_constants end if - ! FIXME: nils: What should we do with height of last grid box dzt(nlev+1)? + ! FIXME: nils: What should we do with height of last grid box dzt(max_nlev+1)? ! This should not be as thick as the distance to the next tracer ! point (which is a dry point). ! Be careful if you divide by 0.5 here. Maybe later we use ddpo @@ -571,6 +606,20 @@ subroutine integrate_tke( & tke_Tiwf = 0.0 tke_Tbck = 0.0 tke_Ttot = 0.0 + cvmix_int_1 = 0.0 + cvmix_int_2 = 0.0 + cvmix_int_3 = 0.0 + + tke_new = 0.0 + tke_upd = 0.0 + tke_surf= 0.0 + + a_dif = 0.0 + b_dif = 0.0 + c_dif = 0.0 + a_tri = 0.0 + b_tri = 0.0 + c_tri = 0.0 !--------------------------------------------------------------------------------- ! set tke_constants locally @@ -586,9 +635,25 @@ subroutine integrate_tke( & tke_surf_min = tke_constants_in%tke_surf_min tke_mxl_choice = tke_constants_in%tke_mxl_choice only_tke = tke_constants_in%only_tke + l_lc = tke_constants_in%l_lc + clc = tke_constants_in%clc use_ubound_dirichlet = tke_constants_in%use_ubound_dirichlet use_lbound_dirichlet = tke_constants_in%use_lbound_dirichlet + !c_k = 0.1 + !c_eps = 0.7 + !alpha_tke = 30.0 + !mxl_min = 1.d-8 + !kappaM_min = 0.0 + !kappaM_max = 100.0 + !cd = 3.75 + !tke_min = 1.d-6 + !tke_mxl_choice = 2 + !tke_surf_min = 1.d-4 + !only_tke = .true. + !use_ubound_dirichlet = .false. + !use_lbound_dirichlet = .false. + ! FIXME: nils: Is kappaM_min ever used? ! FIXME: use kappaM_min from namelist ! FIXME: where is kappaM_min used? @@ -636,8 +701,6 @@ subroutine integrate_tke( & !--------------------------------------------------------------------------------- ! see. Blanke and Delecluse 1993, eq. 2.25 KappaM_out = min(KappaM_max,c_k*mxl*sqrttke) - - ! Richardson number --> see. Blanke and Delecluse 1993, eq. 2.18 Rinum = Nsqr/max(Ssqr,1d-12) ! FIXME: nils: Check this later if IDEMIX is coupled. @@ -669,6 +732,11 @@ subroutine integrate_tke( & P_diss_v(1) = -forc_rho_surf*grav/rho_ref forc = forc + K_diss_v - P_diss_v + ! --- additional langmuir turbulence term + if (l_lc) then + forc = forc + tke_plc + endif + ! --- forcing by internal wave dissipation if (.not.only_tke) then forc = forc + iw_diss @@ -705,7 +773,7 @@ subroutine integrate_tke( & a_dif(1) = 0.d0 ! not part of the diffusion matrix, thus value is arbitrary ! copy tke_old - tke_upd = tke_old + tke_upd(1:nlev+1) = tke_old(1:nlev+1) ! upper boundary condition if (use_ubound_dirichlet) then @@ -796,7 +864,7 @@ subroutine integrate_tke( & ! restrict values of TKE to tke_min, if IDEMIX is not used if (only_tke) then - tke_new = MAX(tke_new, tke_min) + tke_new(1:nlev+1) = MAX(tke_new(1:nlev+1), tke_min) end if !--------------------------------------------------------------------------------- @@ -804,8 +872,8 @@ subroutine integrate_tke( & !--------------------------------------------------------------------------------- ! tke_Ttot = tke_Tbpr + tke_Tspr + tke_Tdif + tke_Tdis ! + tke_Twin + tke_Tiwf - tke_Tbpr = -P_diss_v - tke_Tspr = K_diss_v + tke_Tbpr(1:nlev+1) = -P_diss_v(1:nlev+1) + tke_Tspr(1:nlev+1) = K_diss_v(1:nlev+1) !tke_Tdif is set above !tke_Tdis = -tke_diss_out tke_Tbck = (tke_new-tke_unrest)/dtime @@ -826,92 +894,93 @@ subroutine integrate_tke( & tke_Twin(nlev+1) = 0.0 endif - tke_Tiwf = iw_diss + tke_Tiwf(1:nlev+1) = iw_diss(1:nlev+1) tke_Ttot = (tke_new-tke_old)/dtime !tke = tke_new - tke_Lmix = mxl - tke_Pr = prandtl + tke_Lmix(nlev+1:) = 0.0 + tke_Lmix(1:nlev+1) = mxl(1:nlev+1) + tke_Pr(nlev+1:) = 0.0 + tke_Pr(1:nlev+1) = prandtl(1:nlev+1) ! ----------------------------------------------- ! the rest is for debugging ! ----------------------------------------------- - cvmix_int_1 = KappaM_out - cvmix_int_2 = 0.0 - cvmix_int_2(1) = tke_surf + cvmix_int_1 = KappaH_out + cvmix_int_2 = KappaM_out cvmix_int_3 = Nsqr !cvmix_int_1 = forc !cvmix_int_2 = Nsqr !cvmix_int_3 = Ssqr if (.false.) then - ! write(*,*) 'i = ', i, 'j = ', j, 'tstep_count = ', tstep_count - if (i==45 .and. j==10) then + write(*,*) 'i = ', i, 'j = ', j, 'tstep_count = ', tstep_count + if (i==8 .and. j==10) then !if (i==45 .and. j==10 .and. tstep_count==10) then ! ----------------------------------------------- write(*,*) '================================================================================' write(*,*) 'i = ', i, 'j = ', j, 'tstep_count = ', tstep_count - write(*,*) 'nlev = ', nlev - write(*,*) 'dtime = ', dtime - write(*,*) 'dzt = ', dzt - write(*,*) 'dzw = ', dzw - write(*,*) 'Nsqr = ', Nsqr - write(*,*) 'Ssqr = ', Ssqr - !write(*,*) 'tho = ', tho(i,j,1:nlev) - !write(*,*) 'sao = ', sao(i,j,1:nlev) - !write(*,*) 'bottom_fric = ', bottom_fric - !write(*,*) 'forc_tke_surf = ', forc_tke_surf +!!! write(*,*) 'nlev = ', nlev +!!! write(*,*) 'dtime = ', dtime +!!! write(*,*) 'dzt = ', dzt +!!! write(*,*) 'dzw = ', dzw +!!! write(*,*) 'Nsqr = ', Nsqr +!!! write(*,*) 'Ssqr = ', Ssqr +!!! !write(*,*) 'tho = ', tho(i,j,1:nlev) +!!! !write(*,*) 'sao = ', sao(i,j,1:nlev) +!!! !write(*,*) 'bottom_fric = ', bottom_fric +!!! !write(*,*) 'forc_tke_surf = ', forc_tke_surf write(*,*) 'sqrttke = ', sqrttke - write(*,*) 'mxl = ', mxl - write(*,*) 'KappaM_out = ', KappaM_out - write(*,*) 'KappaH_out = ', KappaH_out - write(*,*) 'forc = ', forc - !write(*,*) 'Rinum = ', Rinum - write(*,*) 'prandtl = ', prandtl - !write(*,*) 'checkpoint d_tri' - !write(*,*) 'K_diss_v = ', K_diss_v - !write(*,*) 'P_diss_v = ', P_diss_v - !write(*,*) 'delta = ', delta - write(*,*) 'ke = ', ke - write(*,*) 'a_tri = ', a_tri - write(*,*) 'b_tri = ', b_tri - write(*,*) 'c_tri = ', c_tri - write(*,*) 'd_tri = ', d_tri - !write(*,*) 'tke_old = ', tke_old - write(*,*) 'tke_new = ', tke_new - write(*,*) 'tke_Tbpr = ', tke_Tbpr - write(*,*) 'tke_Tspr = ', tke_Tspr - write(*,*) 'tke_Tdif = ', tke_Tdif - write(*,*) 'tke_Tdis = ', tke_Tdis - write(*,*) 'tke_Twin = ', tke_Twin - write(*,*) 'tke_Tiwf = ', tke_Tiwf - write(*,*) 'tke_Ttot = ', tke_Ttot - write(*,*) 'tke_Ttot - tke_Tsum = ', & - tke_Ttot-(tke_Tbpr+tke_Tspr+tke_Tdif+tke_Tdis+tke_Twin+tke_Tiwf) - !write(*,*) 'dzw = ', dzw - !write(*,*) 'dzt = ', dzt - ! FIXME: partial bottom cells!! - ! namelist parameters - write(*,*) 'c_k = ', c_k - write(*,*) 'c_eps = ', c_eps - write(*,*) 'alpha_tke = ', alpha_tke - write(*,*) 'mxl_min = ', mxl_min - write(*,*) 'kappaM_min = ', kappaM_min - write(*,*) 'kappaM_max = ', kappaM_max - ! FIXME: Make tke_mxl_choice available! - !write(*,*) 'tke_mxl_choice = ', tke_mxl_choice - !write(*,*) 'cd = ', cd - write(*,*) 'tke_min = ', tke_min - write(*,*) 'tke_surf_min = ', tke_surf_min - write(*,*) 'only_tke = ', only_tke - write(*,*) 'use_ubound_dirichlet = ', use_ubound_dirichlet - write(*,*) 'use_lbound_dirichlet = ', use_lbound_dirichlet - !write(*,*) 'tke(nlev) = ', tke(nlev), 'tke(nlev+1) = ', tke(nlev+1) - !write(*,*) 'tke(nlev+2) = ', tke(nlev+2) +!!! write(*,*) 'mxl = ', mxl +!!! write(*,*) 'KappaM_out = ', KappaM_out +!!! write(*,*) 'KappaH_out = ', KappaH_out +!!! write(*,*) 'forc = ', forc +!!! !write(*,*) 'Rinum = ', Rinum +!!! write(*,*) 'prandtl = ', prandtl +!!! !write(*,*) 'checkpoint d_tri' +!!! !write(*,*) 'K_diss_v = ', K_diss_v +!!! !write(*,*) 'P_diss_v = ', P_diss_v +!!! !write(*,*) 'delta = ', delta +!!! write(*,*) 'ke = ', ke +!!! write(*,*) 'a_tri = ', a_tri +!!! write(*,*) 'b_tri = ', b_tri +!!! write(*,*) 'c_tri = ', c_tri +!!! write(*,*) 'd_tri = ', d_tri +!!! !write(*,*) 'tke_old = ', tke_old +!!! write(*,*) 'tke_new = ', tke_new +!!! write(*,*) 'tke_Tbpr = ', tke_Tbpr +!!! write(*,*) 'tke_Tspr = ', tke_Tspr +!!! write(*,*) 'tke_Tdif = ', tke_Tdif +!!! write(*,*) 'tke_Tdis = ', tke_Tdis +!!! write(*,*) 'tke_Twin = ', tke_Twin +!!! write(*,*) 'tke_Tiwf = ', tke_Tiwf +!!! write(*,*) 'tke_Ttot = ', tke_Ttot +!!! write(*,*) 'tke_Ttot - tke_Tsum = ', & +!!! tke_Ttot-(tke_Tbpr+tke_Tspr+tke_Tdif+tke_Tdis+tke_Twin+tke_Tiwf) +!!! !write(*,*) 'dzw = ', dzw +!!! !write(*,*) 'dzt = ', dzt +!!! ! FIXME: partial bottom cells!! +!!! ! namelist parameters +!!! write(*,*) 'c_k = ', c_k +!!! write(*,*) 'c_eps = ', c_eps +!!! write(*,*) 'alpha_tke = ', alpha_tke +!!! write(*,*) 'mxl_min = ', mxl_min +!!! write(*,*) 'kappaM_min = ', kappaM_min +!!! write(*,*) 'kappaM_max = ', kappaM_max +!!! ! FIXME: Make tke_mxl_choice available! +!!! !write(*,*) 'tke_mxl_choice = ', tke_mxl_choice +!!! !write(*,*) 'cd = ', cd +!!! write(*,*) 'tke_min = ', tke_min +!!! write(*,*) 'tke_surf_min = ', tke_surf_min +!!! write(*,*) 'only_tke = ', only_tke +!!! write(*,*) 'use_ubound_dirichlet = ', use_ubound_dirichlet +!!! write(*,*) 'use_lbound_dirichlet = ', use_lbound_dirichlet +!!! !write(*,*) 'tke(nlev) = ', tke(nlev), 'tke(nlev+1) = ', tke(nlev+1) +!!! !write(*,*) 'tke(nlev+2) = ', tke(nlev+2) write(*,*) '================================================================================' !end if !if (i==45 .and. j==10 .and. tstep_count==10) then - !stop +! stop end if ! if (i==, j==, tstep==) end if ! if (.true./.false.) end subroutine integrate_tke @@ -965,7 +1034,9 @@ subroutine cvmix_tke_put_tke_logical(varname,val,tke_userdef_constants) case('use_ubound_dirichlet') tke_constants_out%use_ubound_dirichlet=val case('use_lbound_dirichlet') - tke_constants_out%use_lbound_dirichlet=val + tke_constants_out%use_lbound_dirichlet=val + case('l_lc') + tke_constants_out%l_lc=val case DEFAULT print*, "ERROR:", trim(varname), " not a valid choice" stop 1 @@ -1007,7 +1078,9 @@ subroutine cvmix_tke_put_tke_real(varname,val,tke_userdef_constants) case('kappaM_max') tke_constants_out%kappaM_max = val case('tke_min') - tke_constants_out%tke_min = val + tke_constants_out%tke_min = val + case('clc') + tke_constants_out%clc = val case('tke_surf_min') tke_constants_out%tke_surf_min = val case DEFAULT diff --git a/src/fesom_main.F90 b/src/fesom_main.F90 new file mode 100755 index 000000000..562fb6e1b --- /dev/null +++ b/src/fesom_main.F90 @@ -0,0 +1,18 @@ +!=============================================================================! +! +! Finite Volume Sea-ice Ocean Model +! +!=============================================================================! +! The main driving routine +!=============================================================================! + +program main + use fesom_module + + integer nsteps + + call fesom_init(nsteps) + call fesom_runloop(nsteps) + call fesom_finalize + +end program diff --git a/src/fesom_module.F90 b/src/fesom_module.F90 new file mode 100755 index 000000000..c66d1a9e9 --- /dev/null +++ b/src/fesom_module.F90 @@ -0,0 +1,474 @@ +! synopsis: save any derived types we initialize +! so they can be reused after fesom_init +module fesom_main_storage_module + USE MOD_MESH + USE MOD_ICE + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE o_ARRAYS + USE o_PARAM + use g_clock + use g_config + use g_comm_auto + use g_forcing_arrays + use io_RESTART + use io_MEANDATA + use io_mesh_info + use diagnostics + use mo_tidal + use tracer_init_interface + use ocean_setup_interface + use ice_setup_interface + use ocean2ice_interface + use oce_fluxes_interface + use update_atm_forcing_interface + use before_oce_step_interface + use oce_timestep_ale_interface + use read_mesh_interface + use fesom_version_info_module + use command_line_options_module + ! Define icepack module +#if defined (__icepack) + use icedrv_main, only: set_icepack, init_icepack, alloc_icepack +#endif + +#if defined (__oasis) + use cpl_driver +#endif + + implicit none + + type :: fesom_main_storage_type + + integer :: n, from_nstep, offset, row, i, provided + integer :: which_readr ! read which restart files (0=netcdf, 1=core dump,2=dtype) + integer, pointer :: mype, npes, MPIerr, MPI_COMM_FESOM + real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc + real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing + real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing + real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart + real(kind=real32) :: runtime_alltimesteps + + + type(t_mesh) mesh + type(t_tracer) tracers + type(t_dyn) dynamics + type(t_partit) partit + type(t_ice) ice + + + character(LEN=256) :: dump_dir, dump_filename + logical :: L_EXISTS + type(t_mesh) mesh_copy + type(t_tracer) tracers_copy + type(t_dyn) dynamics_copy + type(t_ice) ice_copy + + character(LEN=MPI_MAX_LIBRARY_VERSION_STRING) :: mpi_version_txt + integer mpi_version_len + logical fesom_did_mpi_init + + end type + type(fesom_main_storage_type), save, target :: f + +end module + + +! synopsis: main FESOM program split into 3 parts +! this way FESOM can e.g. be used as a library with an external time loop driver +! used with IFS-FESOM +module fesom_module + implicit none + public fesom_init, fesom_runloop, fesom_finalize + private + +contains + + subroutine fesom_init(fesom_total_nsteps) + use fesom_main_storage_module + integer, intent(out) :: fesom_total_nsteps + ! EO parameters + logical mpi_is_initialized + +#if !defined __ifsinterface + if(command_argument_count() > 0) then + call command_line_options%parse() + stop + end if +#endif + + mpi_is_initialized = .false. + f%fesom_did_mpi_init = .false. + +#ifndef __oifs + !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) + !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS + call MPI_Initialized(mpi_is_initialized, f%i) + if(.not. mpi_is_initialized) then + ! do not initialize MPI here if it has been initialized already, e.g. via IFS when fesom is called as library (__ifsinterface is defined) + call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, f%provided, f%i) + f%fesom_did_mpi_init = .true. + end if +#endif + + +#if defined (__oasis) + call cpl_oasis3mct_init(f%partit,f%partit%MPI_COMM_FESOM) +#endif + f%t1 = MPI_Wtime() + + call par_init(f%partit) + + f%mype =>f%partit%mype + f%MPIerr =>f%partit%MPIerr + f%MPI_COMM_FESOM=>f%partit%MPI_COMM_FESOM + f%npes =>f%partit%npes + if(f%mype==0) then + write(*,*) + print *,"FESOM2 git SHA: "//fesom_git_sha() + call MPI_Get_library_version(f%mpi_version_txt, f%mpi_version_len, f%MPIERR) + print *,"MPI library version: "//trim(f%mpi_version_txt) + print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' + end if + !===================== + ! Read configuration data, + ! load the mesh and fill in + ! auxiliary mesh arrays + !===================== + call setup_model(f%partit) ! Read Namelists, always before clock_init + call clock_init(f%partit) ! read the clock file + call get_run_steps(fesom_total_nsteps, f%partit) + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call mesh_setup'//achar(27)//'[0m' + call mesh_setup(f%partit, f%mesh) + + if (f%mype==0) write(*,*) 'FESOM mesh_setup... complete' + + !===================== + ! Allocate field variables + ! and additional arrays needed for + ! fancy advection etc. + !===================== + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call check_mesh_consistency'//achar(27)//'[0m' + call check_mesh_consistency(f%partit, f%mesh) + if (f%mype==0) f%t2=MPI_Wtime() + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call dynamics_init'//achar(27)//'[0m' + call dynamics_init(f%dynamics, f%partit, f%mesh) + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call tracer_init'//achar(27)//'[0m' + call tracer_init(f%tracers, f%partit, f%mesh) ! allocate array of ocean tracers (derived type "t_tracer") + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call arrays_init'//achar(27)//'[0m' + call arrays_init(f%tracers%num_tracers, f%partit, f%mesh) ! allocate other arrays (to be refactured same as tracers in the future) + + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean_setup'//achar(27)//'[0m' + call ocean_setup(f%dynamics, f%tracers, f%partit, f%mesh) + + if (f%mype==0) then + write(*,*) 'FESOM ocean_setup... complete' + f%t3=MPI_Wtime() + endif + call forcing_setup(f%partit, f%mesh) + + if (f%mype==0) f%t4=MPI_Wtime() + if (use_ice) then + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ice_setup'//achar(27)//'[0m' + call ice_setup(f%ice, f%tracers, f%partit, f%mesh) + f%ice%ice_steps_since_upd = f%ice%ice_ave_steps-1 + f%ice%ice_update=.true. + if (f%mype==0) write(*,*) 'EVP scheme option=', f%ice%whichEVP + else + ! create a dummy ice derived type with only a_ice, m_ice, m_snow and + ! uvice since oce_timesteps still needs in moment + ! ice as an input for mo_convect(ice, partit, mesh), call + ! compute_vel_rhs(ice, dynamics, partit, mesh), + ! call write_step_info(...) and call check_blowup(...) + call ice_init_toyocean_dummy(f%ice, f%partit, f%mesh) + endif + + if (f%mype==0) f%t5=MPI_Wtime() + call compute_diagnostics(0, f%dynamics, f%tracers, f%partit, f%mesh) ! allocate arrays for diagnostic +#if defined (__oasis) + call cpl_oasis3mct_define_unstr(f%partit, f%mesh) + if(f%mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv +#endif + +#if defined (__icepack) + !===================== + ! Setup icepack + !===================== + if (f%mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' + call set_icepack(f%ice, f%partit) + call alloc_icepack + call init_icepack(f%ice, f%tracers%data(1), f%mesh) + if (f%mype==0) write(*,*) 'Icepack: setup complete' +#endif + call clock_newyear ! check if it is a new year + if (f%mype==0) f%t6=MPI_Wtime() + !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ + call restart(0, r_restart, f%which_readr, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) + if (f%mype==0) f%t7=MPI_Wtime() + ! store grid information into netcdf file + if (.not. r_restart) call write_mesh_info(f%partit, f%mesh) + + !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ + !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ + if (r_restart .and. .not. f%which_readr==2) then + call restart_thickness_ale(f%partit, f%mesh) + end if + if (f%mype==0) then + f%t8=MPI_Wtime() + + f%rtime_setup_mesh = real( f%t2 - f%t1 ,real32) + f%rtime_setup_ocean = real( f%t3 - f%t2 ,real32) + f%rtime_setup_forcing = real( f%t4 - f%t3 ,real32) + f%rtime_setup_ice = real( f%t5 - f%t4 ,real32) + f%rtime_setup_restart = real( f%t7 - f%t6 ,real32) + f%rtime_setup_other = real((f%t8 - f%t7) + (f%t6 - f%t5) ,real32) + + write(*,*) '==========================================' + write(*,*) 'MODEL SETUP took on mype=0 [seconds] ' + write(*,*) 'runtime setup total ',real(f%t8-f%t1,real32) + write(*,*) ' > runtime setup mesh ',f%rtime_setup_mesh + write(*,*) ' > runtime setup ocean ',f%rtime_setup_ocean + write(*,*) ' > runtime setup forcing ',f%rtime_setup_forcing + write(*,*) ' > runtime setup ice ',f%rtime_setup_ice + write(*,*) ' > runtime setup restart ',f%rtime_setup_restart + write(*,*) ' > runtime setup other ',f%rtime_setup_other + write(*,*) '============================================' + endif + + ! f%dump_dir='DUMP/' + ! INQUIRE(file=trim(f%dump_dir), EXIST=f%L_EXISTS) + ! if (.not. f%L_EXISTS) call system('mkdir '//trim(f%dump_dir)) + + ! write (f%dump_filename, "(A7,I7.7)") "t_mesh.", f%mype + ! open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + ! write (f%mype+300) f%mesh + ! close (f%mype+300) + + ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") + ! read (f%mype+300) f%mesh_copy + ! close (f%mype+300) + + ! write (f%dump_filename, "(A9,I7.7)") "t_tracer.", f%mype + ! open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + ! write (f%mype+300) f%tracers + ! close (f%mype+300) + + ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") + ! read (f%mype+300) f%dynamics_copy + ! close (f%mype+300) + + ! write (f%dump_filename, "(A9,I7.7)") "t_dynamics.", f%mype + ! open (f%mype+300, file=TRIM(f%dump_dir)//trim(f%dump_filename), status='replace', form="unformatted") + ! write (f%mype+300) f%dynamics + ! close (f%mype+300) + + ! open (f%mype+300, file=trim(f%dump_filename), status='old', form="unformatted") + ! read (f%mype+300) f%tracers_copy + ! close (f%mype+300) + + !call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) + !stop + ! + ! if (f%mype==10) write(,) f%mesh1%ssh_stiff%values-f%mesh%ssh_stiff%value + + ! Initialize timers + f%rtime_fullice = 0._WP + f%rtime_write_restart = 0._WP + f%rtime_write_means = 0._WP + f%rtime_compute_diag = 0._WP + f%rtime_read_forcing = 0._WP + + f%from_nstep = 1 + end subroutine + + + subroutine fesom_runloop(current_nsteps) + use fesom_main_storage_module + integer, intent(in) :: current_nsteps + ! EO parameters + integer n + + !===================== + ! Time stepping + !===================== + + if (f%mype==0) write(*,*) 'FESOM start iteration before the barrier...' + call MPI_Barrier(f%MPI_COMM_FESOM, f%MPIERR) + + if (f%mype==0) then + write(*,*) 'FESOM start iteration after the barrier...' + f%t0 = MPI_Wtime() + endif + if(f%mype==0) then + write(*,*) + print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;32m'//' --> FESOM STARTS TIME LOOP '//achar(27)//'[0m' + end if + !___MODEL TIME STEPPING LOOP________________________________________________ + if (use_global_tides) then + call foreph_ini(yearnew, month, f%partit) + end if + do n=f%from_nstep, f%from_nstep-1+current_nsteps + if (use_global_tides) then + call foreph(f%partit, f%mesh) + end if + mstep = n + if (mod(n,logfile_outfreq)==0 .and. f%mype==0) then + write(*,*) 'FESOM =======================================================' +! write(*,*) 'FESOM step:',n,' day:', n*dt/24./3600., + write(*,*) 'FESOM step:',n,' day:', daynew,' year:',yearnew + write(*,*) + end if +#if defined (__oifs) || defined (__oasis) + seconds_til_now=INT(dt)*(n-1) +#endif + call clock + !___compute horizontal velocity on nodes (originaly on elements)________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_vel_nodes'//achar(27)//'[0m' + call compute_vel_nodes(f%dynamics, f%partit, f%mesh) + + !___model sea-ice step__________________________________________________ + f%t1 = MPI_Wtime() + if(use_ice) then + !___compute fluxes from ocean to ice________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' + call ocean2ice(f%ice, f%dynamics, f%tracers, f%partit, f%mesh) + + !___compute update of atmospheric forcing____________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' + f%t0_frc = MPI_Wtime() + call update_atm_forcing(n, f%ice, f%tracers, f%dynamics, f%partit, f%mesh) + f%t1_frc = MPI_Wtime() + !___compute ice step________________________________________________ + if (f%ice%ice_steps_since_upd>=f%ice%ice_ave_steps-1) then + f%ice%ice_update=.true. + f%ice%ice_steps_since_upd = 0 + else + f%ice%ice_update=.false. + f%ice%ice_steps_since_upd=f%ice%ice_steps_since_upd+1 + endif + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' + if (f%ice%ice_update) call ice_timestep(n, f%ice, f%partit, f%mesh) + !___compute fluxes to the ocean: heat, freshwater, momentum_________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' + call oce_fluxes_mom(f%ice, f%dynamics, f%partit, f%mesh) ! momentum only + call oce_fluxes(f%ice, f%dynamics, f%tracers, f%partit, f%mesh) + end if + call before_oce_step(f%dynamics, f%tracers, f%partit, f%mesh) ! prepare the things if required + f%t2 = MPI_Wtime() + + !___model ocean step____________________________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' + call oce_timestep_ale(n, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) + + f%t3 = MPI_Wtime() + !___compute energy diagnostics..._______________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' + call compute_diagnostics(1, f%dynamics, f%tracers, f%partit, f%mesh) + + f%t4 = MPI_Wtime() + !___prepare output______________________________________________________ + if (flag_debug .and. f%mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' + call output (n, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) + + f%t5 = MPI_Wtime() + call restart(n, .false., f%which_readr, f%ice, f%dynamics, f%tracers, f%partit, f%mesh) + f%t6 = MPI_Wtime() + + f%rtime_fullice = f%rtime_fullice + f%t2 - f%t1 + f%rtime_compute_diag = f%rtime_compute_diag + f%t4 - f%t3 + f%rtime_write_means = f%rtime_write_means + f%t5 - f%t4 + f%rtime_write_restart = f%rtime_write_restart + f%t6 - f%t5 + f%rtime_read_forcing = f%rtime_read_forcing + f%t1_frc - f%t0_frc + end do + + f%from_nstep = f%from_nstep+current_nsteps + end subroutine + + + subroutine fesom_finalize() + use fesom_main_storage_module + ! EO parameters + real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) + + call finalize_output() + call finalize_restart() + + !___FINISH MODEL RUN________________________________________________________ + + call MPI_Barrier(f%MPI_COMM_FESOM, f%MPIERR) + if (f%mype==0) then + f%t1 = MPI_Wtime() + f%runtime_alltimesteps = real(f%t1-f%t0,real32) + write(*,*) 'FESOM Run is finished, updating clock' + endif + + mean_rtime(1) = rtime_oce + mean_rtime(2) = rtime_oce_mixpres + mean_rtime(3) = rtime_oce_dyn + mean_rtime(4) = rtime_oce_dynssh + mean_rtime(5) = rtime_oce_solvessh + mean_rtime(6) = rtime_oce_GMRedi + mean_rtime(7) = rtime_oce_solvetra + mean_rtime(8) = rtime_ice + mean_rtime(9) = rtime_tot + mean_rtime(10) = f%rtime_fullice - f%rtime_read_forcing + mean_rtime(11) = f%rtime_compute_diag + mean_rtime(12) = f%rtime_write_means + mean_rtime(13) = f%rtime_write_restart + mean_rtime(14) = f%rtime_read_forcing + + max_rtime(1:14) = mean_rtime(1:14) + min_rtime(1:14) = mean_rtime(1:14) + + call MPI_AllREDUCE(MPI_IN_PLACE, mean_rtime, 14, MPI_REAL, MPI_SUM, f%MPI_COMM_FESOM, f%MPIerr) + mean_rtime(1:14) = mean_rtime(1:14) / real(f%npes,real32) + call MPI_AllREDUCE(MPI_IN_PLACE, max_rtime, 14, MPI_REAL, MPI_MAX, f%MPI_COMM_FESOM, f%MPIerr) + call MPI_AllREDUCE(MPI_IN_PLACE, min_rtime, 14, MPI_REAL, MPI_MIN, f%MPI_COMM_FESOM, f%MPIerr) + + if(f%fesom_did_mpi_init) call par_ex(f%partit%MPI_COMM_FESOM, f%partit%mype) ! finalize MPI before FESOM prints its stats block, otherwise there is sometimes output from other processes from an earlier time in the programm AFTER the starts block (with parastationMPI) + if (f%mype==0) then + 41 format (a35,a10,2a15) !Format for table heading + 42 format (a30,3f15.4) !Format for table content + + print 41, '___MODEL RUNTIME per task [seconds]','_____mean_','___________min_', '___________max_' + print 42, ' runtime ocean: ', mean_rtime(1), min_rtime(1), max_rtime(1) + print 42, ' > runtime oce. mix,pres. :', mean_rtime(2), min_rtime(2), max_rtime(2) + print 42, ' > runtime oce. dyn. u,v,w:', mean_rtime(3), min_rtime(3), max_rtime(3) + print 42, ' > runtime oce. dyn. ssh :', mean_rtime(4), min_rtime(4), max_rtime(4) + print 42, ' > runtime oce. solve ssh :', mean_rtime(5), min_rtime(5), max_rtime(5) + print 42, ' > runtime oce. GM/Redi :', mean_rtime(6), min_rtime(6), max_rtime(6) + print 42, ' > runtime oce. tracer :', mean_rtime(7), min_rtime(7), max_rtime(7) + print 42, ' runtime ice : ', mean_rtime(10), min_rtime(10), max_rtime(10) + print 42, ' > runtime ice step : ', mean_rtime(8), min_rtime(8), max_rtime(8) + print 42, ' runtime diag: ', mean_rtime(11), min_rtime(11), max_rtime(11) + print 42, ' runtime output: ', mean_rtime(12), min_rtime(12), max_rtime(12) + print 42, ' runtime restart: ', mean_rtime(13), min_rtime(13), max_rtime(13) + print 42, ' runtime forcing: ', mean_rtime(14), min_rtime(14), max_rtime(14) + print 42, ' runtime total (ice+oce): ', mean_rtime(9), min_rtime(9), max_rtime(9) + + 43 format (a33,i15) !Format Ncores + 44 format (a33,i15) !Format OMP threads + 45 format (a33,f15.4,a4) !Format runtime + + write(*,*) + write(*,*) '======================================================' + write(*,*) '================ BENCHMARK RUNTIME ===================' + print 43, ' Number of cores : ',f%npes +#if defined(_OPENMP) + print 44, ' Max OpenMP threads : ',OMP_GET_MAX_THREADS() +#endif + print 45, ' Runtime for all timesteps : ',f%runtime_alltimesteps,' sec' + write(*,*) '======================================================' + write(*,*) + end if +! call clock_finish + end subroutine + +end module diff --git a/src/fort_part.c b/src/fort_part.c index 837596771..9d903a73d 100644 --- a/src/fort_part.c +++ b/src/fort_part.c @@ -44,7 +44,7 @@ #if METIS_VERSION == 5 /* ---------------- METIS 5 part ------------------------ */ #include "metis.h" -void partit(idx_t *n, idx_t *ptr, idx_t *adj, idx_t *wgt, idx_t *np, idx_t *part) +void do_partit(idx_t *n, idx_t *ptr, idx_t *adj, idx_t *wgt, idx_t *np, idx_t *part) { int i, j, wgt_type; idx_t opt[METIS_NOPTIONS]; @@ -278,7 +278,7 @@ void partit(idx_t *n, idx_t *ptr, idx_t *adj, idx_t *wgt, idx_t *np, idx_t *part wgt_loc[vertex_loc]=wgt[j]; } - partit(&n_loc, ptr_loc, adj_loc, wgt_loc, np, part_loc); + do_partit(&n_loc, ptr_loc, adj_loc, wgt_loc, np, part_loc); /* Convert the partitioned graph back to the current level indexing */ for (j=0; j<*n; j++) @@ -302,7 +302,7 @@ void partit(idx_t *n, idx_t *ptr, idx_t *adj, idx_t *wgt, idx_t *np, idx_t *part #elif METIS_VERSION == 4 /* ---------------- METIS 4 part ------------------------ */ -void partit(int *n, int *ptr, int *adj, int *wgt, int *np, int *part) +void do_partit(int *n, int *ptr, int *adj, int *wgt, int *np, int *part) { int opt[5]; int numfl=1; /* 0: C-numbering ; 1: F-numbering*/ diff --git a/src/fortran_utils.F90 b/src/fortran_utils.F90 new file mode 100644 index 000000000..1ebd62323 --- /dev/null +++ b/src/fortran_utils.F90 @@ -0,0 +1,83 @@ + ! synopsis: basic Fortran utilities, no MPI, dependencies only to INTRINSIC modules +module fortran_utils + implicit none + +contains + + + function int_to_txt(val) result(txt) + integer, intent(in) :: val + character(:), allocatable :: txt + ! EO parameters + integer val_width + + if(val == 0) then + val_width = 1 + else + val_width = int(log10(real(val)))+1 ! does not work for val=0 + end if + allocate(character(val_width) :: txt) + write(txt,'(i0)') val + end function + + + function int_to_txt_pad(val, width) result(txt) + integer, intent(in) :: val, width + character(:), allocatable :: txt + ! EO parameters + integer w, val_width + character(:), allocatable :: widthtxt + + if(val == 0) then + val_width = 1 + else + val_width = int(log10(real(val)))+1 ! does not work for val=0 + end if + w = width + if(w < val_width) w = val_width + widthtxt = int_to_txt(w) + allocate(character(w) :: txt) + write(txt,'(i0.'//widthtxt//')') val + end function + + + function mpirank_to_txt(mpicomm) result(txt) + integer, intent(in) :: mpicomm + character(:), allocatable :: txt + ! EO parameters + integer mype + integer npes + integer mpierr + include 'mpif.h' + + call MPI_Comm_Rank(mpicomm, mype, mpierr) + call MPI_Comm_Size(mpicomm, npes, mpierr) + txt = int_to_txt_pad(mype,int(log10(real(npes)))+1) ! pad to the width of the number of processes + end function + + + ! using EXECUTE_COMMAND_LINE to call mkdir sometimes fail (EXECUTE_COMMAND_LINE is forked as an new process, which may be the problem) + ! try to use the C mkdir as an alternative + subroutine mkdir(path) + use iso_c_binding + character(len=*), intent(in) :: path + ! EO parameters + integer result + character(:,kind=C_CHAR), allocatable :: pathcopy + + interface + function mkdir_c(path, mode) bind(c,name="mkdir") + use iso_c_binding + integer(c_int) mkdir_c + character(kind=c_char,len=1) path(*) + integer(c_int), value :: mode + end function + end interface + + pathcopy = path ! we need to pass an array of c_char to the C funcktion (this is not a correct type conversion, but Fortran characters seem to be of the same kind as c_char) + ! result is 0 if the dir has been created from this call, otherwise -1 + ! the mode will not exactly be what we pass here, as it is subtracted by the umask bits (and possibly more) + result = mkdir_c(pathcopy//C_NULL_CHAR, int(o'777', c_int)) + end subroutine + +end module diff --git a/src/fvom_init.F90 b/src/fvom_init.F90 index 80f0ec99e..6a6e8da7e 100755 --- a/src/fvom_init.F90 +++ b/src/fvom_init.F90 @@ -14,8 +14,7 @@ program MAIN use o_PARAM use MOD_MESH - use o_MESH - use g_PARSUP + use MOD_PARTIT use g_CONFIG use g_rotate_grid @@ -59,15 +58,19 @@ subroutine stiff_mat_ini(mesh) end subroutine stiff_mat_ini end interface interface - subroutine set_par_support_ini(mesh) + subroutine set_par_support_ini(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine set_par_support_ini end interface interface - subroutine communication_ini(mesh) + subroutine communication_ini(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + use mod_partit + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine communication_ini end interface @@ -87,7 +90,8 @@ end subroutine find_levels_cavity character(len=MAX_PATH) :: nmlfile !> name of configuration namelist file integer :: start_t, interm_t, finish_t, rate_t - type(t_mesh), target, save :: mesh + type(t_mesh), target, save :: mesh + type(t_partit), target, save :: partit call system_clock(start_t, rate_t) interm_t = start_t @@ -127,12 +131,12 @@ end subroutine find_levels_cavity interm_t = finish_t call stiff_mat_ini(mesh) - call set_par_support_ini(mesh) + call set_par_support_ini(partit, mesh) call system_clock(finish_t) print '("**** Partitioning time = ",f12.3," seconds. ****")', & real(finish_t-interm_t)/real(rate_t) interm_t = finish_t - call communication_ini(mesh) + call communication_ini(partit, mesh) call system_clock(finish_t) print '("**** Storing partitioned mesh time = ",f12.3," seconds. ****")', & real(finish_t-interm_t)/real(rate_t) @@ -145,7 +149,6 @@ end program MAIN subroutine read_mesh_ini(mesh) USE MOD_MESH USE o_PARAM -USE g_PARSUP use g_CONFIG use g_rotate_grid ! @@ -163,6 +166,9 @@ subroutine read_mesh_ini(mesh) ! =================== ! Surface mesh ! =================== + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: read elem2d.out & nod2d.out '//achar(27)//'[0m' + open (20,file=trim(meshpath)//'nod2d.out', status='old') open (21,file=trim(meshpath)//'elem2d.out', status='old') READ(20,*) mesh%nod2D @@ -198,15 +204,8 @@ subroutine read_mesh_ini(mesh) mesh%elem2D_nodes(1:3,:) = reshape(elem_data, shape(mesh%elem2D_nodes(1:3,:))) mesh%elem2D_nodes(4,:) = mesh%elem2D_nodes(1,:) end if - + deallocate(elem_data) -!!$ do n=1,elem2D -!!$ read(21,*) n1,n2,n3 -!!$ elem2D_nodes(1,n)=n1 -!!$ elem2D_nodes(2,n)=n2 -!!$ elem2D_nodes(3,n)=n3 -!!$ end do - ! CLOSE(21) write(*,*) '=========================' @@ -219,43 +218,48 @@ END SUBROUTINE read_mesh_ini subroutine read_mesh_cavity(mesh) use mod_mesh use o_PARAM - use g_PARSUP use g_CONFIG implicit none type(t_mesh), intent(inout), target :: mesh - integer :: node - character(len=MAX_PATH) :: fname + integer :: node, auxi + character(len=MAX_PATH) :: fname logical :: file_exist=.False. #include "associate_mesh_ini.h" !___________________________________________________________________________ - if (mype==0) then - write(*,*) '____________________________________________________________' - write(*,*) ' --> read cavity depth' - end if + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: read cavity depth '//achar(27)//'[0m' !___________________________________________________________________________ ! read depth of cavity-ocean boundary - fname = trim(meshpath)//'cavity_depth.out' + if (use_cavityonelem) then + fname = trim(meshpath)//'cavity_depth@elem.out' + else + fname = trim(meshpath)//'cavity_depth@node.out' + end if file_exist=.False. inquire(file=trim(fname),exist=file_exist) if (file_exist) then open (21,file=fname, status='old') - allocate(mesh%cavity_depth(mesh%nod2D)) + if (use_cavityonelem) then + allocate(mesh%cavity_depth(mesh%elem2d)) + else + allocate(mesh%cavity_depth(mesh%nod2D)) + end if cavity_depth => mesh%cavity_depth else - if (mype==0) then - write(*,*) '____________________________________________________________________' - write(*,*) ' ERROR: could not find cavity file: cavity_depth.out' - write(*,*) ' --> stop partitioning here !' - write(*,*) '____________________________________________________________________' - end if + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: could not find cavity file: cavity_depth.out' + write(*,*) ' --> stop partitioning here !' + write(*,*) '____________________________________________________________________' stop end if !___________________________________________________________________________ - do node=1, mesh%nod2D + auxi=mesh%nod2D + if (use_cavityonelem) auxi=mesh%elem2d + do node=1, auxi read(21,*) mesh%cavity_depth(node) end do @@ -309,9 +313,7 @@ END SUBROUTINE test_tri_ini !> Finds edges. Creates 3 files: edgenum.out, edges.out, edge_tri.out SUBROUTINE find_edges_ini(mesh) USE MOD_MESH -USE o_MESH USE o_PARAM -USE g_PARSUP USE g_CONFIG use g_rotate_grid IMPLICIT NONE @@ -320,24 +322,26 @@ SUBROUTINE find_edges_ini(mesh) subroutine elem_center(elem, x, y, mesh) USE MOD_MESH USE g_CONFIG - integer, intent(in) :: elem - real(kind=WP), intent(out) :: x, y - type(t_mesh), intent(in), target :: mesh + integer, intent(in) :: elem + real(kind=WP), intent(out) :: x, y + type(t_mesh), intent(in), target :: mesh end subroutine elem_center end interface -integer, allocatable :: aux1(:), ne_num(:), ne_pos(:,:) +integer, allocatable :: aux1(:), ne_num(:), ne_pos(:,:), nn_num(:), nn_pos(:,:) integer :: counter, counter_in, n, k, q integer :: elem, elem1, elems(2), q1, q2 integer :: elnodes(4), ed(2), flag, eledges(4) integer :: temp(100), node real(kind=WP) :: xc(2), xe(2), ax(3), amin -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" ! ==================== ! (a) find edges. To make the procedure fast ! one needs neighbourhood arrays ! ==================== +print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' +print *, achar(27)//'[7;1m' //' -->: compute edge connectivity '//achar(27)//'[0m' allocate(ne_num(nod2d), ne_pos(MAX_ADJACENT, nod2D), nn_num(nod2D)) ne_num=0 @@ -348,7 +352,6 @@ end subroutine elem_center DO q=1,q1 ne_num(elnodes(q))=ne_num(elnodes(q))+1 if (ne_num(elnodes(q)) > MAX_ADJACENT ) then - print *,'Parameter in o_MESH from ocean_modules.F90, too small.' print *,'Recompile with larger value for MAX_ADJACENT.' stop else @@ -646,64 +649,183 @@ END SUBROUTINE find_edges_ini !> Fixes rough topography, by converting some oceans cells to ground cell(reflected by changing levels arrays) !> Creates 2 files: elvls.out, nlvls.out subroutine find_levels(mesh) -use g_config -use mod_mesh -use g_parsup -implicit none -INTEGER :: nodes(3), elems(3), eledges(3) -integer :: elem, elem1, j, n, q, node, enum,count1,count2,exit_flag,i,nz,fileID=111 -real(kind=WP) :: x,dmean -integer :: thers_lev=5 -character(MAX_PATH) :: file_name -type(t_mesh), intent(inout), target :: mesh -#include "associate_mesh_ini.h" + use g_config + use mod_mesh + implicit none + INTEGER :: nodes(3), elems(3), eledges(3) + integer :: elem1, j, n, nneighb, q, node, i, nz, auxi + integer :: count_iter, count_neighb_open, exit_flag, fileID=111 + real(kind=WP) :: x, dmean + logical :: file_exist + integer :: max_iter=1000 + character(MAX_PATH) :: file_name + type(t_mesh), intent(inout), target :: mesh +#include "associate_mesh_ini.h" -ALLOCATE(mesh%depth(nod2D)) -depth => mesh%depth !required after the allocation, otherwise the pointer remains undefined -file_name=trim(meshpath)//'aux3d.out' -open(fileID, file=file_name) -read(fileID,*) nl ! the number of levels -allocate(mesh%zbar(nl)) ! their standard depths -zbar => mesh%zbar !required after the allocation, otherwise the pointer remains undefined -read(fileID,*) zbar -if(zbar(2)>0) zbar=-zbar ! zbar is negative -allocate(mesh%Z(nl-1)) -Z => mesh%Z !required after the allocation, otherwise the pointer remains undefined -Z=zbar(1:nl-1)+zbar(2:nl) ! mid-depths of cells -Z=0.5_WP*Z -DO n=1,nod2D - read(fileID,*) x - if (x>0) x=-x - if (x>zbar(thers_lev)) x=zbar(thers_lev) !TODO KK threshholding for depth - depth(n)=x -END DO -close(fileID) + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: read bottom depth '//achar(27)//'[0m' + + !___________________________________________________________________________ + ! allocate depth + if (use_depthonelem) then + allocate(mesh%depth(elem2D)) + else + allocate(mesh%depth(nod2D)) + end if + depth => mesh%depth !required after the allocation, otherwise the pointer remains undefined + + !______________________________________________________________________________ + ! read depth from aux3d.out + if (trim(use_depthfile)=='aux3d') then + ! check if aux3d.out file does exist + file_exist=.False. + file_name=trim(meshpath)//'aux3d.out' + inquire(file=trim(file_name),exist=file_exist) + !_______________________________________________________________________ + if (file_exist) then + write(*," (A, A)") ' read file:',trim(file_name) + !___________________________________________________________________ + ! load fesom2.0 aux3d.out file + open(fileID, file=file_name) + + ! read the number of levels + read(fileID,*) nl + allocate(mesh%zbar(nl)) ! their standard depths + + ! read full depth levels + zbar => mesh%zbar !required after the allocation, otherwise the pointer remains undefined + read(fileID,*) zbar + if(zbar(2)>0) zbar=-zbar ! zbar is negative + + ! compute mid depth levels + allocate(mesh%Z(nl-1)) + Z => mesh%Z !required after the allocation, otherwise the pointer remains undefined + Z=zbar(1:nl-1)+zbar(2:nl) ! mid-depths of cells + Z=0.5_WP*Z + else + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use aux3d.out file to define your depth, but ' + write(*,*) ' the file seems not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" ' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + stop + end if + !___________________________________________________________________________ + ! read depth from depth@node.out or depth@elem.out + elseif (trim(use_depthfile)=='depth@') then + !_______________________________________________________________________ + ! load file depth_zlev.out --> contains number of model levels and full depth + ! levels + file_exist=.False. + file_name=trim(meshpath)//'depth_zlev.out' + inquire(file=trim(file_name),exist=file_exist) + if (file_exist) then + write(*," (A, A)") ' read file:',trim(file_name) + !___________________________________________________________________ + ! load fesom2.0 aux3d.out file + open(fileID, file=file_name) + + ! read the number of levels + read(fileID,*) nl + allocate(mesh%zbar(nl)) ! their standard depths + + ! read full depth levels + zbar => mesh%zbar !required after the allocation, otherwise the pointer remains undefined + read(fileID,*) zbar + if(zbar(2)>0) zbar=-zbar ! zbar is negative + + ! compute mid depth levels + allocate(mesh%Z(nl-1)) + Z => mesh%Z !required after the allocation, otherwise the pointer remains undefined + Z=zbar(1:nl-1)+zbar(2:nl) ! mid-depths of cells + Z=0.5_WP*Z + + close(fileID) + else + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use depth@elem.out or depth@node.out file, therefore' + write(*,*) ' you also need the file depth_zlev.out which contains the model ' + write(*,*) ' number of layers and the depth of model levels. This file seems ' + write(*,*) ' not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" and your meshfolder' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + stop + endif -if(depth(2)>0) depth=-depth ! depth is negative - + !_______________________________________________________________________ + ! load file depth@elem.out or depth@node.out contains topography either at + ! nodes or elements + if (use_depthonelem) then + file_name=trim(meshpath)//'depth@elem.out' + else + file_name=trim(meshpath)//'depth@node.out' + end if + inquire(file=trim(file_name),exist=file_exist) + if (file_exist) then + write(*," (A, A)") ' read file:',trim(file_name) + open(fileID, file=file_name) + else + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use depth@elem.out or depth@node.out file to ' + write(*,*) ' define your depth, but the file seems not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" and your meshfolder ' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + stop + end if + end if + + !___________________________________________________________________________ + ! read topography from file + auxi = nod2d + if (use_depthonelem) auxi = elem2d +! write(*,*) ' use_depthonelem = ',use_depthonelem +! write(*,*) ' auxi =',auxi + DO n = 1, auxi + read(fileID,*) x + if (x>0) x=-x + if (x>zbar(thers_zbar_lev)) x=zbar(thers_zbar_lev) !TODO KK threshholding for depth + depth(n)=x + END DO + close(fileID) + if(depth(2)>0) depth=-depth ! depth is negative -allocate(mesh%nlevels(elem2D)) -nlevels => mesh%nlevels !required after the allocation, otherwise the pointer remains undefined -allocate(mesh%nlevels_nod2D(nod2D)) -nlevels_nod2D => mesh%nlevels_nod2D !required after the allocation, otherwise the pointer remains undefined + !___________________________________________________________________________ + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute elem, vertice bottom depth index '//achar(27)//'[0m' + + allocate(mesh%nlevels(elem2D)) + nlevels => mesh%nlevels !required after the allocation, otherwise the pointer remains undefined + allocate(mesh%nlevels_nod2D(nod2D)) + nlevels_nod2D => mesh%nlevels_nod2D !required after the allocation, otherwise the pointer remains undefined !___________________________________________________________________________ ! Compute the initial number number of elementa levels, based on the vertice ! depth information do n=1, elem2D - nodes=elem2D_nodes(1:3,n) - !_________________________________________________________________________ - ! depth of element is shallowest depth of sorounding vertices - if (trim(which_depth_n2e) .eq. 'min') then ; dmean=maxval(depth(nodes)) - ! depth of element is deepest depth of sorounding vertices - elseif (trim(which_depth_n2e) .eq. 'max') then ; dmean=minval(depth(nodes)) - ! DEFAULT: depth of element is mean depth of sorounding vertices - elseif (trim(which_depth_n2e) .eq. 'mean') then; dmean=sum(depth(nodes))/3.0 + !_______________________________________________________________________ + if (use_depthonelem) then + dmean = depth(n) ! depth is already defined on elements + else + nodes=elem2D_nodes(1:3,n) + !___________________________________________________________________ + ! depth of element is shallowest depth of sorounding vertices + if (trim(which_depth_n2e) .eq. 'min') then ; dmean=maxval(depth(nodes)) + ! depth of element is deepest depth of sorounding vertices + elseif (trim(which_depth_n2e) .eq. 'max') then ; dmean=minval(depth(nodes)) + ! DEFAULT: depth of element is mean depth of sorounding vertices + elseif (trim(which_depth_n2e) .eq. 'mean') then; dmean=sum(depth(nodes))/3.0 + end if end if - !_________________________________________________________________________ + !_______________________________________________________________________ exit_flag=0 do nz=1,nl-1 if(Z(nz)=0) nlevels(n)=thers_lev + if(dmean>=0) nlevels(n)=thers_zbar_lev ! set minimum number of levels to --> thers_lev=5 - if(nlevels(n) do n=1, elem2D - + + !___________________________________________________________________________ + ! write out vertical level indices before iterative geometric adaption to + ! exclude isolated cells + !_______________________________________________________________________ + file_name=trim(meshpath)//'elvls_raw.out' + open(fileID, file=file_name) + do n=1,elem2D + write(fileID,*) nlevels(n) + end do + close(fileID) + !___________________________________________________________________________ ! check for isolated cells (cells with at least two boundary faces or three ! boundary vertices) and eliminate them --> FESOM2.0 doesn't like these kind ! of cells - do nz=4,nl + do nz=thers_zbar_lev+1,nl exit_flag=0 - count1=0 + count_iter=0 !_______________________________________________________________________ ! iteration loop within each layer - do while((exit_flag==0).and.(count1<1000)) + do while((exit_flag==0).and.(count_iter if elem2D_nodes(1,n) == elem2D_nodes(4,n): True --> q=3 --> triangular mesh ! --> if elem2D_nodes(1,n) == elem2D_nodes(4,n): False --> q=4 --> quad mesh - q = merge(3,4,elem2D_nodes(1,n) == elem2D_nodes(4,n)) + nneighb = merge(3,4,elem2D_nodes(1,n) == elem2D_nodes(4,n)) ! ! +---isolated bottom cell ! ._______________ | _______________________. @@ -750,16 +883,15 @@ subroutine find_levels(mesh) ! |###|###|###|###|###|###|###|###|###|###|###|###|###|###| ! if (nlevels(n)>=nz) then - count2=0 + count_neighb_open=0 elems=elem_neighbors(1:3,n) - !___________________________________________________________ ! loop over neighbouring triangles - do i=1,q + do i=1,nneighb if (elems(i)>1) then if (nlevels(elems(i))>=nz) then !count neighbours - count2=count2+1 + count_neighb_open=count_neighb_open+1 endif endif enddo @@ -768,15 +900,18 @@ subroutine find_levels(mesh) ! check how many open faces to neighboring triangles the cell ! has, if there are less than 2 its isolated (a cell should ! have at least 2 valid neighbours) - if (count2<2) then + if (count_neighb_open<2) then ! if cell is "isolated", and the one levels shallower bottom ! cell would be shallower than the minimum vertical level ! treshhold (thers_lev). --> in this make sorrounding elements ! one level deeper to reconnect the isolated cell - if (nz-11) nlevels(elems(i)) = max(nlevels(elems(i)),nz) + if (nz-10) then + nlevels(elems(i)) = max(nlevels(elems(i)),nz) + end if end do + !if cell is "isolated" convert to one level shallower bottom cell else nlevels(n)=nz-1 @@ -788,10 +923,12 @@ subroutine find_levels(mesh) end if ! --> if (nlevels(n)>=nz) then end do ! --> do n=1,elem2D end do ! --> do while((exit_flag==0).and.(count1<1000)) + write(*,"(A, I5, A, i5, A, I3)") ' -[iter ]->: nlevel, iter/maxiter=',count_iter,'/',max_iter,', nz=',nz end do ! --> do nz=4,nl !___________________________________________________________________________ ! vertical vertice level index of ocean bottom boundary + write(*,"(A)" ) ' -[compu]->: nlevels_nod2D ' nlevels_nod2D=0 do n=1,elem2D q = merge(3,4,elem2D_nodes(1,n) == elem2D_nodes(4,n)) @@ -805,242 +942,360 @@ subroutine find_levels(mesh) !___________________________________________________________________________ ! write vertical level indices into file - if (mype==0) then - !_______________________________________________________________________ - file_name=trim(meshpath)//'elvls.out' - open(fileID, file=file_name) - do n=1,elem2D - write(fileID,*) nlevels(n) - end do - close(fileID) - - !_______________________________________________________________________ - file_name=trim(meshpath)//'nlvls.out' - open(fileID, file=file_name) - do n=1,nod2D - write(fileID,*) nlevels_nod2D(n) - end do - close(fileID) + file_name=trim(meshpath)//'elvls.out' + open(fileID, file=file_name) + do n=1,elem2D + write(fileID,*) nlevels(n) + end do + close(fileID) + + file_name=trim(meshpath)//'nlvls.out' + open(fileID, file=file_name) + do n=1,nod2D + write(fileID,*) nlevels_nod2D(n) + end do + close(fileID) - !_______________________________________________________________________ - write(*,*) '=========================' - write(*,*) 'Mesh is read : ', 'nod2D=', nod2D,' elem2D=', elem2D, ' nl=', nl - write(*,*) 'Min/max depth on mype: ', -zbar(minval(nlevels)),-zbar(maxval(nlevels)) - write(*,*) '3D tracer nodes on mype ', sum(nlevels_nod2d)-(elem2D) - write(*,*) '=========================' - endif + !_______________________________________________________________________ + write(*,*) '=========================' + write(*,*) 'Mesh is read : ', 'nod2D=', nod2D,' elem2D=', elem2D, ' nl=', nl + write(*,*) 'Min/max depth on mype: ', -zbar(minval(nlevels)),-zbar(maxval(nlevels)) + write(*,*) '3D tracer nodes on mype ', sum(nlevels_nod2d)-(elem2D) + write(*,*) '=========================' end subroutine find_levels - +! +! !_______________________________________________________________________________ ! finds elemental and nodal levels of cavity-ocean boundary. ! Creates 2 files: cavity_elvls.out, cavity_nlvls.out subroutine find_levels_cavity(mesh) use mod_mesh use g_config - use g_parsup implicit none integer :: nodes(3), elems(3) - integer :: elem, node, nz, j - integer :: exit_flag, count_iter, count_neighb_open, nneighb, cavity_maxlev + integer :: elem, node, nz, j, idx + integer :: count_neighb_open, nneighb, cavity_maxlev, count_isoelem + integer :: exit_flag1, count_iter, max_iter=1000, exit_flag2, count_iter2, max_iter2=10 real(kind=WP) :: dmean character(MAX_PATH) :: file_name - type(t_mesh), intent(inout), target :: mesh + integer, allocatable, dimension(:) :: numelemtonode, idxelemtonode + logical, allocatable, dimension(:) :: elemreducelvl, elemfixlvl + type(t_mesh), intent(inout), target :: mesh #include "associate_mesh_ini.h" - + !___________________________________________________________________________ + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute elem, vertice cavity depth index '//achar(27)//'[0m' + !___________________________________________________________________________ allocate(mesh%ulevels(elem2D)) ulevels => mesh%ulevels allocate(mesh%ulevels_nod2D(nod2D)) ulevels_nod2D => mesh%ulevels_nod2D -!!PS allocate(mesh%cavity_flag_n(nod2D)) -!!PS cavity_flag_n => mesh%cavity_flag_n - + !___________________________________________________________________________ ! Compute level position of ocean-cavity boundary cavity_maxlev=0 do elem=1, elem2D - nodes=elem2D_nodes(1:3,elem) + !_______________________________________________________________________ - ! depth of element is shallowest depth of sorounding vertices - if (trim(which_depth_n2e) .eq. 'min') then ; dmean=maxval(cavity_depth(nodes)) - ! depth of element is deepest depth of sorounding vertices - elseif (trim(which_depth_n2e) .eq. 'max') then ; dmean=minval(cavity_depth(nodes)) - ! DEFAULT: depth of element is mean depth of sorounding vertices - elseif (trim(which_depth_n2e) .eq. 'mean') then ; dmean=sum(cavity_depth(nodes))/3.0 + if (use_cavityonelem) then + dmean = cavity_depth(elem) + else + nodes=elem2D_nodes(1:3,elem) + !_______________________________________________________________________ + ! depth of element is shallowest depth of sorounding vertices + if (trim(which_depth_n2e) .eq. 'min') then ; dmean=maxval(cavity_depth(nodes)) + ! depth of element is deepest depth of sorounding vertices + elseif (trim(which_depth_n2e) .eq. 'max') then ; dmean=minval(cavity_depth(nodes)) + ! DEFAULT: depth of element is mean depth of sorounding vertices + elseif (trim(which_depth_n2e) .eq. 'mean') then ; dmean=sum(cavity_depth(nodes))/3.0 + end if end if !_______________________________________________________________________ ! vertical elem level index of cavity-ocean boundary - exit_flag=0 ulevels(elem) = 1 + if (dmean<0.0_WP) ulevels(elem) = 2 + do nz=1,nlevels(elem)-1 - !!PS if(Z(nz) should not be ! possible in FESOM2.0 ! loop over all cavity levels - do nz=1,cavity_maxlev - exit_flag=0 - count_iter=0 + allocate(elemreducelvl(elem2d),elemfixlvl(elem2d)) + allocate(numelemtonode(nl),idxelemtonode(nl)) + !___________________________________________________________________________ + ! outer iteration loop + count_iter2 = 0 + exit_flag2 = 0 + elemfixlvl = .False. + do while((exit_flag2==0) .and. (count_iter2 tri mesh, nneighb = 4 --> quad mesh - nneighb = merge(3,4,elem2D_nodes(1,elem) == elem2D_nodes(4,elem)) - ! - ! .___________________________.~~~~~~~~~~~~~~~~~~~~~~~~~~ - ! |###|###|###|###|###|###|###| - ! |# CAVITY |###| . |###|###| OCEAN - ! |###|###|###| /|\|###| - ! |###|###| | - ! |###| +-- Not good can lead to isolated cells - ! - if (nz >= ulevels(elem)) then - count_neighb_open=0 - elems=elem_neighbors(1:3,elem) - - !___________________________________________________________ - ! loop over neighbouring triangles - do j = 1, nneighb - if (elems(j)>0) then ! if its a valid boundary triangle, 0=missing value - ! check for isolated cell - if (ulevels(elems(j))<=nz) then - !count the open faces to neighboring cells - count_neighb_open=count_neighb_open+1 - endif - end if - end do ! --> do i = 1, nneighb - - !___________________________________________________________ - ! check how many open faces to neighboring triangles the cell - ! has, if there are less than 2 its isolated (a cell should - ! have at least 2 valid neighbours) - ! --> in this case shift cavity-ocean interface one level down - if (count_neighb_open<2) then - ! if cell is isolated convert it to a deeper ocean levels - ! except when this levels would remain less than 3 valid - ! bottom levels --> in case make the levels of all sorounding - ! one level shallower - if (nlevels(elem)-(nz+1)<=3) then - do j = 1, nneighb - if (elems(j)>0 .and. ulevels(elems(j))>1 ) ulevels(elems(j)) = min(ulevels(elems(j)),nz) - end do - else - ulevels(elem)=nz+1 + ! iteration loop within each layer + do while((exit_flag1==0).and.(count_iter tri mesh, nneighb = 4 --> quad mesh + nneighb = merge(3,4,elem2D_nodes(1,elem) == elem2D_nodes(4,elem)) + elems = elem_neighbors(1:3,elem) + ! + ! .___________________________.~~~~~~~~~~~~~~~~~~~~~~~~~~ + ! |###|###|###|###|###|###|###| + ! |# CAVITY |###| . |###|###| OCEAN + ! |###|###|###| /|\|###| + ! |###|###| | + ! |###| +-- Not good can lead to isolated cells + ! + if ( nz >= ulevels(elem) .and. nz0) then ! if its a valid boundary triangle, 0=missing value + ! check for isolated cell + if ( ulevels(elems(j))<= nz .and. & + nlevels(elems(j))> nz ) then + !count the open faces to neighboring cells + count_neighb_open=count_neighb_open+1 + endif + end if + end do ! --> do i = 1, nneighb + + !_______________________________________________________ + ! check how many open faces to neighboring triangles the cell + ! has, if there are less than 2 its isolated (a cell should + ! have at least 2 valid neighbours) + ! --> in this case shift cavity-ocean interface one level down + if (count_neighb_open<2) then + count_isoelem = count_isoelem+1 + ! if cell is isolated convert it to a deeper ocean levels + ! except when this levels would remain less than 3 valid + ! bottom levels --> in case make the levels of all sorounding + ! triangles shallower + if ( (nlevels(elem)-(nz+1))>=3 .and. & + (elemreducelvl(elem) .eqv. .False.) .and. & + (elemfixlvl( elem) .eqv. .False.) & + ) then + ulevels(elem)=nz+1 + else + ! --> can not increase depth anymore to eleminate isolated + ! cell, otherwise lessthan 3 valid layers + ! --> therefor reduce depth of ONE!!! of the neighbouring + ! triangles. Choose trinagle whos depth is already closest + ! to nz + idx = minloc(ulevels(elems)-nz, 1, MASK=( (elems>0) .and. ((ulevels(elems)-nz)>0) ) ) + ulevels(elems(idx)) = nz-1 + elemreducelvl(elems(idx)) = .True. + end if - end if + !force recheck for all current ocean cells + exit_flag1=0 + end if ! --> if (count_neighb_open<2) then - !force recheck for all current ocean cells - exit_flag=0 - endif ! --> if (count_neighb_open<2) then + end if ! --> if ( nz >= ulevels(elem) .and. nz if (nz >= ulevels(elem)) then - end do ! --> do elem=1,elem2D - end do ! --> do while((exit_flag==0).and.(count_iter<1000)) - end do ! --> do nz=1,cavity_maxlev - - !___________________________________________________________________________ - ! vertical vertice level index of cavity_ocean boundary - ulevels_nod2D = nl - do elem=1,elem2D - nneighb = merge(3,4,elem2D_nodes(1,elem) == elem2D_nodes(4,elem)) + end do ! --> do elem=1,elem2D + + end do ! --> do while((exit_flag==0).and.(count_iter<1000)) + write(*,"(A, I5, A, i5, A, I3)") ' -[iter ]->: ulevel, iter/maxiter=',count_iter,'/',max_iter,', nz=',nz + end do ! --> do nz=1,cavity_maxlev + + !_______________________________________________________________________ + ! vertical vertice level index of cavity_ocean boundary + write(*,"(A)" ) ' -[compu]->: ulevels_nod2D ' + ulevels_nod2D = nl + do elem=1,elem2D + nneighb = merge(3,4,elem2D_nodes(1,elem) == elem2D_nodes(4,elem)) + !___________________________________________________________________ + ! loop over neighbouring triangles + do j=1,nneighb + node=elem2D_nodes(j,elem) + ulevels_nod2D(node)=min(ulevels_nod2D(node),ulevels(elem)) + end do + end do ! --> do elem=1,elem2D + !_______________________________________________________________________ - ! loop over neighbouring triangles - do j=1,nneighb - node=elem2D_nodes(j,elem) -!!PS if(ulevels_nod2D(node)<=ulevels(elem)) then -!!PS ulevels_nod2D(node)=ulevels(elem) -!!PS end if - ulevels_nod2D(node)=min(ulevels_nod2D(node),ulevels(elem)) + ! check ulevels if ulevels=nlevels(elem)) then + write(*,*) ' -[check]->: elem cavity depth deeper or equal bottom depth, elem=',elem + exit_flag2 = 0 + + end if - end do - end do - - !___________________________________________________________________________ - ! check ulevels if ulevels=nlevels(elem)) then - if (mype==0) write(*,*) ' ERROR: found element cavity depth deeper or equal bottom depth' - call par_ex(0) - end if - if (nlevels(elem)-ulevels(elem)<3) then - write(*,*) ' ERROR: found less than three valid element ocean layers' - write(*,*) ' ulevels,nlevels = ',ulevels(elem), nlevels(elem) - write(*,*) ' ulevels(neighb) = ',ulevels(elem_neighbors(1:3,elem)) - write(*,*) ' nlevels(neighb) = ',nlevels(elem_neighbors(1:3,elem)) - end if + if (nlevels(elem)-ulevels(elem)<3) then + write(*,*) ' -[check]->: less than three valid elem ocean layers, elem=',elem + exit_flag2 = 0 + + end if + end do ! --> do elem=1,elem2D + + !_______________________________________________________________________ + ! check ulevels_nod2d if ulevels_nod2d=nlevels_nod2D(node)) then + write(*,*) ' -[check]->: vertice cavity depth deeper or equal bottom depth, node=', node + exit_flag2 = 0 + end if + + !___________________________________________________________________ + if (nlevels_nod2D(node)-ulevels_nod2D(node)<3) then + write(*,*) ' -[check]->: less than three valid vertice ocean layers, node=', node + exit_flag2 = 0 + end if + end do ! --> do node=1,nod2D + + do elem=1,elem2D + if (ulevels(elem)< maxval(ulevels_nod2D(elem2D_nodes(:,elem))) ) then + write(*,*) ' -[check]->: found elem cavity shallower than its valid maximum cavity vertice depths, elem=', elem2d + exit_flag2 = 0 + end if + end do ! --> do elem=1,elem2D + + !_______________________________________________________________________ + ! compute how many triangle elements contribute to every vertice in every layer + count_iter=0 + do node=1, nod2D + !___________________________________________________________________ + numelemtonode=0 + idxelemtonode=0 + + !___________________________________________________________________ + ! compute how many triangle elements contribute to vertice in every layer + do j=1,nod_in_elem2D_num(node) + elem=nod_in_elem2D(j,node) + do nz=ulevels(elem),nlevels(elem)-1 + numelemtonode(nz) = numelemtonode(nz) + 1 + idxelemtonode(nz) = elem + end do + end do + + !___________________________________________________________________ + ! check if every vertice in every layer should be connected to at least + ! two triangle elements ! + do nz = ulevels_nod2D(node), nlevels_nod2D(node)-1 + + !_______________________________________________________________ + ! nodes has zero neighbouring triangles and is completely isolated + ! need to adapt ulevels by hand --> inflicts another outher + ! iteration loop (exit_flag2=0) + if (numelemtonode(nz)==0) then + exit_flag2 = 0 + count_iter = count_iter+1 + write(*,"( A, I1, A, I7, A, I3)") ' -[check]->: node has only ', numelemtonode(nz) ,' triangle: n=', node, ', nz=',nz + !___________________________________________________________ + ! if node has no neighboring triangle somewhere in the middle + ! of the water column at nz (can happen but seldom) than set + ! all ulevels(elem) of sorounding trinagles whos ulevel is + ! depper than nz, equal to nz and fix that value to forbit it + ! to be changed (elemfixlvl > 0) + do j=1,nod_in_elem2D_num(node) + elem=nod_in_elem2D(j,node) + if (ulevels(elem)>nz) then + ulevels(elem) = nz + elemfixlvl(elem) = .True. + end if + end do + end if + + !_______________________________________________________________ + ! nodes has just one neighbouring triangle --> but needs two --> + ! inflicts another outher iteration loop (exit_flag2=0) + if (numelemtonode(nz)==1) then + exit_flag2 = 0 + count_iter = count_iter+1 + write(*,"( A, I1, A, I7, A, I3)") ' -[check]->: node has only ', numelemtonode(nz) ,' triangle: n=', node, ', nz=',nz + end if + + end do ! --> do nz = ulevels_nod2D(node), nlevels_nod2D(node)-1 + + end do ! --> do node=1, nod2D + + !_______________________________________________________________________ + ! check if cavity geometry did converge + if (exit_flag2 == 0) then + print *, achar(27)//'[33m' //'____________________________________________________________'//achar(27)//'[0m' + print *, ' -['//achar(27)//'[33m'//'WARN'//achar(27)//'[0m'//']->: Cavity geom. not converged yet, do further outer iteration' + write(*,"(A, I3, A, I3)") ' iter step ', count_iter2,'/', max_iter2 + write(*,*) + end if + + !_______________________________________________________________________ end do + deallocate(elemreducelvl,elemfixlvl) + deallocate(numelemtonode,idxelemtonode) !___________________________________________________________________________ - ! check ulevels_nod2d if ulevels_nod2d=nlevels_nod2D(elem)) then - if (mype==0) write(*,*) ' ERROR: found vertice cavity depth deeper or equal bottom depth' - call par_ex(0) - end if - if (nlevels_nod2D(elem)-ulevels_nod2D(elem)<3) then - if (mype==0) write(*,*) ' ERROR: found less than three valid vertice ocean layers' - end if - end do - - do elem=1,elem2D - if (ulevels(elem)< maxval(ulevels_nod2D(elem2D_nodes(:,elem))) ) then - if (mype==0) then - write(*,*) ' ERROR: found element cavity depth that is shallower than its valid maximum cavity vertice depths' - write(*,*) ' ule | uln = ',ulevels(elem),' | ',ulevels_nod2D(elem2D_nodes(:,elem)) - end if - call par_ex(0) - end if - end do -!!PS !___________________________________________________________________________ -!!PS ! compute nodal cavity flag: 1 yes cavity/ 0 no cavity -!!PS cavity_flag = 0 -!!PS do node=1,nod2D -!!PS if (ulevels_nod2D(node)>1) cavity_flag(node)=1 -!!PS end do + ! check if cavity geometry totaly converged or failed to converge in the later + ! case will break up model + if (exit_flag2 == 0) then + write(*,*) + print *, achar(27)//'[31m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;31m'//' -[ERROR]->: Cavity geometry constrains did not converge !!! *\(>︿<)/*'//achar(27)//'[0m' + write(*,*) + stop + else + write(*,*) + print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' + print *, ' -['//achar(27)//'[7;32m'//' OK '//achar(27)//'[0m'//']->: Cavity geometry constrains did converge !!! *\(^o^)/*' + end if + !___________________________________________________________________________ ! write out cavity mesh files for vertice and elemental position of ! vertical cavity-ocean boundary - if (mype==0) then - ! write out elemental cavity-ocean boundary level - file_name=trim(meshpath)//'cavity_elvls.out' - open(20, file=file_name) - do elem=1,elem2D - write(20,*) ulevels(elem) - enddo - close(20) - - ! write out vertice cavity-ocean boundary level + yes/no cavity flag - file_name=trim(meshpath)//'cavity_nlvls.out' - open(20, file=file_name) -!!PS file_name=trim(meshpath)//'cavity_flag.out' -!!PS open(21, file=file_name) - do node=1,nod2D - write(20,*) ulevels_nod2D(node) -!!PS write(21,*) cavity_flag(node) - enddo - close(20) -!!PS close(21) - endif + ! write out elemental cavity-ocean boundary level + file_name=trim(meshpath)//'cavity_elvls.out' + open(20, file=file_name) + do elem=1,elem2D + write(20,*) ulevels(elem) + enddo + close(20) + + ! write out vertice cavity-ocean boundary level + yes/no cavity flag + file_name=trim(meshpath)//'cavity_nlvls.out' + open(20, file=file_name) + do node=1,nod2D + write(20,*) ulevels_nod2D(node) + enddo + close(20) + end subroutine find_levels_cavity @@ -1095,7 +1350,6 @@ end subroutine elem_center SUBROUTINE find_elem_neighbors_ini(mesh) ! For each element three its element neighbors are found USE MOD_MESH -USE g_PARSUP implicit none integer :: elem, eledges(3), elem1, j, n, elnodes(3) type(t_mesh), intent(inout), target :: mesh @@ -1128,7 +1382,6 @@ SUBROUTINE find_elem_neighbors_ini(mesh) if (elem1<2) then write(*,*) 'find_elem_neighbors_ini:Insufficient number of neighbors ',elem write(*,*) 'find_elem_neighbors_ini:Elem neighbors ',elem_neighbors(:,elem) - if (mype==0) then write(*,*) '____________________________________________________________________' write(*,*) ' ERROR: The mesh you want to partitioning contains triangles that' write(*,*) ' have just one neighbor, this was OK for FESOM1.4 but not' @@ -1150,8 +1403,7 @@ SUBROUTINE find_elem_neighbors_ini(mesh) write(*,*) ' eliminate these triangles and the corresponding ' write(*,*) ' unconnected vertice and try to re-partitioning again ' write(*,*) '____________________________________________________________________' - end if - STOP + STOP end if END DO @@ -1222,7 +1474,6 @@ subroutine stiff_mat_ini(mesh) num_ne(nod(j)) = num_ne(nod(j)) + 1 if (max(num_ne(nod(i)), num_ne(nod(j))) > MAX_ADJACENT ) then - print *,'Parameter in o_MESH from ocean_modules.F90, too small.' print *,'Recompile with larger value for MAX_ADJACENT.' stop else @@ -1254,10 +1505,10 @@ end subroutine stiff_mat_ini !=================================================================== ! Setup of communication arrays -subroutine communication_ini(mesh) +subroutine communication_ini(partit, mesh) use MOD_MESH USE g_CONFIG - USE g_PARSUP + USE MOD_PARTIT use omp_lib implicit none @@ -1265,9 +1516,15 @@ subroutine communication_ini(mesh) character*10 :: npes_string character(MAX_PATH) :: dist_mesh_dir LOGICAL :: L_EXISTS - type(t_mesh), intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" #include "associate_mesh_ini.h" +#include "associate_part_ass.h" !only my + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute communication arrays '//achar(27)//'[0m' + ! Create the distributed mesh subdirectory write(npes_string,"(I10)") npes dist_mesh_dir=trim(meshpath)//'dist_'//trim(ADJUSTL(npes_string))//'/' @@ -1287,50 +1544,59 @@ subroutine communication_ini(mesh) !$OMP DO do n = 0, npes-1 mype = n ! mype is threadprivate and must not be iterator - call communication_nodn(mesh) - call communication_elemn(mesh) - call save_dist_mesh(mesh) ! Write out communication file com_infoxxxxx.out + call communication_nodn(partit, mesh) + call communication_elemn(partit, mesh) + call save_dist_mesh(partit, mesh) ! Write out communication file com_infoxxxxx.out end do !$OMP END DO !$OMP END PARALLEL deallocate(mesh%elem_neighbors) deallocate(mesh%elem_edges) - deallocate(part) + deallocate(partit%part) write(*,*) 'Communication arrays have been set up' end subroutine communication_ini !================================================================= -subroutine set_par_support_ini(mesh) - use g_PARSUP +subroutine set_par_support_ini(partit, mesh) use iso_c_binding, only: idx_t=>C_INT32_T use MOD_MESH + use MOD_PARTIT use g_config implicit none - interface - subroutine check_partitioning(mesh) + subroutine check_partitioning(partit, mesh) use MOD_MESH - type(t_mesh), intent(inout) , target :: mesh + use MOD_PARTIT + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine check_partitioning end interface integer :: n, j, k, nini, nend, ierr integer(idx_t) :: np(10) - type(t_mesh), intent(inout), target :: mesh - + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit interface - subroutine partit(n,ptr,adj,wgt,np,part) bind(C) + subroutine do_partit(n,ptr,adj,wgt,np,part) bind(C) use iso_c_binding, only: idx_t=>C_INT32_T integer(idx_t), intent(in) :: n, ptr(*), adj(*), wgt(*), np(*) integer(idx_t), intent(out) :: part(*) - end subroutine partit + end subroutine do_partit end interface + +#include "associate_part_def.h" #include "associate_mesh_ini.h" +#include "associate_part_ass.h" + if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: compute partitioning '//achar(27)//'[0m' + end if + ! Construct partitioning vector if (n_levels<1 .OR. n_levels>10) then print *,'Number of hierarchic partition levels is out of range [1-10]! Aborting...' - call MPI_ABORT( MPI_COMM_FESOM, 1 ) + stop end if np(:) = n_part(:) ! Number of partitions on each hierarchy level @@ -1342,7 +1608,8 @@ end subroutine partit np(n_levels+1) = 0 end if - allocate(part(nod2D)) + allocate(partit%part(nod2D)) + part=>partit%part part=0 npes = PRODUCT(np(1:n_levels)) @@ -1352,10 +1619,12 @@ end subroutine partit end if write(*,*) 'Calling partit for npes=', np - call partit(ssh_stiff%dim, ssh_stiff%rowptr, ssh_stiff%colind, & + call do_partit(ssh_stiff%dim, ssh_stiff%rowptr, ssh_stiff%colind, & nlevels_nod2D, np, part) - call check_partitioning(mesh) +write(*,*) 'DONE' +write(*,*) size(partit%part) + call check_partitioning(partit, mesh) write(*,*) 'Partitioning is done.' @@ -1367,7 +1636,7 @@ end subroutine partit deallocate(mesh%nlevels_nod2D) end subroutine set_par_support_ini !======================================================================= -subroutine check_partitioning(mesh) +subroutine check_partitioning(partit, mesh) ! In general, METIS 5 has several advantages compared to METIS 4, e.g., ! * neighbouring tasks get neighbouring partitions (important for multicore computers!) @@ -1381,22 +1650,30 @@ subroutine check_partitioning(mesh) ! trying not to spoil the load balance. use MOD_MESH - use g_PARSUP + use MOD_PARTIT + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh integer :: i, j, k, n, n_iso, n_iter, is, ie, kmax, np, cnt - integer :: nod_per_partition(2,0:npes-1) + integer :: nod_per_partition(2,0:partit%npes-1) integer :: max_nod_per_part(2), min_nod_per_part(2) integer :: average_nod_per_part(2), node_neighb_part(100) logical :: already_counted, found_part integer :: max_adjacent_nodes integer, allocatable :: ne_part(:), ne_part_num(:), ne_part_load(:,:) - type(t_mesh), intent(inout), target :: mesh +#include "associate_part_def.h" #include "associate_mesh_ini.h" +#include "associate_part_ass.h" !just for partit%part + + if (mype==0) then + print *, achar(27)//'[1m' //'____________________________________________________________'//achar(27)//'[0m' + print *, achar(27)//'[7;1m' //' -->: check partitioning '//achar(27)//'[0m' + end if ! Check load balancing - do i=0,npes-1 + do i=0, npes-1 nod_per_partition(1,i) = count(part(:) == i) - nod_per_partition(2,i) = sum(nlevels_nod2D,part(:) == i) + nod_per_partition(2,i) = sum(nlevels_nod2D, part(:) == i) enddo min_nod_per_part(1) = minval( nod_per_partition(1,:)) diff --git a/src/fvom_main.F90 b/src/fvom_main.F90 deleted file mode 100755 index 4c9f0fb97..000000000 --- a/src/fvom_main.F90 +++ /dev/null @@ -1,320 +0,0 @@ -!=============================================================================! -! -! Finite Volume Sea-ice Ocean Model -! -!=============================================================================! -! The main driving routine -!=============================================================================! - -program main -USE MOD_MESH -USE o_ARRAYS -USE o_PARAM -USE g_PARSUP -USE i_PARAM -use i_ARRAYS -use g_clock -use g_config -use g_comm_auto -use g_forcing_arrays -use io_RESTART -use io_MEANDATA -use io_mesh_info -use diagnostics -use mo_tidal -use fesom_version_info_module - -! Define icepack module -#if defined (__icepack) -use icedrv_main, only: set_icepack, init_icepack, alloc_icepack -#endif - -#if defined (__oasis) -use cpl_driver -#endif - -IMPLICIT NONE - -integer :: n, nsteps, offset, row, i, provided -real(kind=WP) :: t0, t1, t2, t3, t4, t5, t6, t7, t8, t0_ice, t1_ice, t0_frc, t1_frc -real(kind=WP) :: rtime_fullice, rtime_write_restart, rtime_write_means, rtime_compute_diag, rtime_read_forcing -real(kind=real32) :: rtime_setup_mesh, rtime_setup_ocean, rtime_setup_forcing -real(kind=real32) :: rtime_setup_ice, rtime_setup_other, rtime_setup_restart -real(kind=real32) :: mean_rtime(15), max_rtime(15), min_rtime(15) -real(kind=real32) :: runtime_alltimesteps - -type(t_mesh), target, save :: mesh - -#ifndef __oifs - !ECHAM6-FESOM2 coupling: cpl_oasis3mct_init is called here in order to avoid circular dependencies between modules (cpl_driver and g_PARSUP) - !OIFS-FESOM2 coupling: does not require MPI_INIT here as this is done by OASIS - call MPI_INIT_THREAD(MPI_THREAD_MULTIPLE, provided, i) -#endif - - -#if defined (__oasis) - call cpl_oasis3mct_init(MPI_COMM_FESOM) -#endif - t1 = MPI_Wtime() - - call par_init - if(mype==0) then - write(*,*) - print *,"FESOM2 git SHA: "//fesom_git_sha() - print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;32m'//' --> FESOM BUILDS UP MODEL CONFIGURATION '//achar(27)//'[0m' - end if - !===================== - ! Read configuration data, - ! load the mesh and fill in - ! auxiliary mesh arrays - !===================== - call setup_model ! Read Namelists, always before clock_init - call clock_init ! read the clock file - call get_run_steps(nsteps) - call mesh_setup(mesh) - - if (mype==0) write(*,*) 'FESOM mesh_setup... complete' - - !===================== - ! Allocate field variables - ! and additional arrays needed for - ! fancy advection etc. - !===================== - call check_mesh_consistency(mesh) - if (mype==0) t2=MPI_Wtime() - call ocean_setup(mesh) - if (mype==0) then - write(*,*) 'FESOM ocean_setup... complete' - t3=MPI_Wtime() - endif - call forcing_setup(mesh) - if (mype==0) t4=MPI_Wtime() - if (use_ice) then - call ice_setup(mesh) - ice_steps_since_upd = ice_ave_steps-1 - ice_update=.true. - if (mype==0) write(*,*) 'EVP scheme option=', whichEVP - endif - if (mype==0) t5=MPI_Wtime() - call compute_diagnostics(0, mesh) ! allocate arrays for diagnostic -#if defined (__oasis) - call cpl_oasis3mct_define_unstr(mesh) - if(mype==0) write(*,*) 'FESOM ----> cpl_oasis3mct_define_unstr nsend, nrecv:',nsend, nrecv -#endif - -#if defined (__icepack) - !===================== - ! Setup icepack - !===================== - if (mype==0) write(*,*) 'Icepack: reading namelists from namelist.icepack' - call set_icepack - call alloc_icepack - call init_icepack(mesh) - if (mype==0) write(*,*) 'Icepack: setup complete' -#endif - - call clock_newyear ! check if it is a new year - if (mype==0) t6=MPI_Wtime() - !___CREATE NEW RESTART FILE IF APPLICABLE___________________________________ - ! The interface to the restart module is made via call restart ! - ! The inputs are: istep, l_write, l_create - ! if istep is not zero it will be decided whether restart shall be written - ! if l_write is TRUE the restart will be forced - ! if l_read the restart will be read - ! as an example, for reading restart one does: call restart(0, .false., .false., .true.) - call restart(0, .false., r_restart, mesh) ! istep, l_write, l_read - if (mype==0) t7=MPI_Wtime() - - ! store grid information into netcdf file - if (.not. r_restart) call write_mesh_info(mesh) - - !___IF RESTART WITH ZLEVEL OR ZSTAR IS DONE, ALSO THE ACTUAL LEVELS AND ____ - !___MIDDEPTH LEVELS NEEDS TO BE CALCULATET AT RESTART_______________________ - if (r_restart) then - call restart_thickness_ale(mesh) - end if - - if (mype==0) then - t8=MPI_Wtime() - - rtime_setup_mesh = real( t2 - t1 ,real32) - rtime_setup_ocean = real( t3 - t2 ,real32) - rtime_setup_forcing = real( t4 - t3 ,real32) - rtime_setup_ice = real( t5 - t4 ,real32) - rtime_setup_restart = real( t7 - t6 ,real32) - rtime_setup_other = real((t8 - t7) + (t6 - t5) ,real32) - - write(*,*) '==========================================' - write(*,*) 'MODEL SETUP took on mype=0 [seconds] ' - write(*,*) 'runtime setup total ',real(t8-t1,real32) - write(*,*) ' > runtime setup mesh ',rtime_setup_mesh - write(*,*) ' > runtime setup ocean ',rtime_setup_ocean - write(*,*) ' > runtime setup forcing ',rtime_setup_forcing - write(*,*) ' > runtime setup ice ',rtime_setup_ice - write(*,*) ' > runtime setup restart ',rtime_setup_restart - write(*,*) ' > runtime setup other ',rtime_setup_other - write(*,*) '============================================' - endif - - !===================== - ! Time stepping - !===================== - -! Initialize timers - rtime_fullice = 0._WP - rtime_write_restart = 0._WP - rtime_write_means = 0._WP - rtime_compute_diag = 0._WP - rtime_read_forcing = 0._WP - - if (mype==0) write(*,*) 'FESOM start iteration before the barrier...' - call MPI_Barrier(MPI_COMM_FESOM, MPIERR) - - if (mype==0) then - write(*,*) 'FESOM start iteration after the barrier...' - t0 = MPI_Wtime() - endif - if(mype==0) then - write(*,*) - print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' - print *, achar(27)//'[7;32m'//' --> FESOM STARTS TIME LOOP '//achar(27)//'[0m' - end if - !___MODEL TIME STEPPING LOOP________________________________________________ - if (use_global_tides) then - call foreph_ini(yearnew, month) - end if - - do n=1, nsteps - if (use_global_tides) then - call foreph(mesh) - end if - mstep = n - if (mod(n,logfile_outfreq)==0 .and. mype==0) then - write(*,*) 'FESOM =======================================================' -! write(*,*) 'FESOM step:',n,' day:', n*dt/24./3600., - write(*,*) 'FESOM step:',n,' day:', daynew,' year:',yearnew - write(*,*) - end if -#if defined (__oifs) || defined (__oasis) - seconds_til_now=INT(dt)*(n-1) -#endif - call clock - - !___compute horizontal velocity on nodes (originaly on elements)________ - call compute_vel_nodes(mesh) - - !___model sea-ice step__________________________________________________ - t1 = MPI_Wtime() - if(use_ice) then - !___compute fluxes from ocean to ice________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ocean2ice(n)'//achar(27)//'[0m' - call ocean2ice(mesh) - - !___compute update of atmospheric forcing____________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call update_atm_forcing(n)'//achar(27)//'[0m' - t0_frc = MPI_Wtime() - call update_atm_forcing(n, mesh) - t1_frc = MPI_Wtime() - !___compute ice step________________________________________________ - if (ice_steps_since_upd>=ice_ave_steps-1) then - ice_update=.true. - ice_steps_since_upd = 0 - else - ice_update=.false. - ice_steps_since_upd=ice_steps_since_upd+1 - endif - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call ice_timestep(n)'//achar(27)//'[0m' - if (ice_update) call ice_timestep(n, mesh) - !___compute fluxes to the ocean: heat, freshwater, momentum_________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_fluxes_mom...'//achar(27)//'[0m' - call oce_fluxes_mom(mesh) ! momentum only - call oce_fluxes(mesh) - end if - call before_oce_step(mesh) ! prepare the things if required - t2 = MPI_Wtime() - - !___model ocean step____________________________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call oce_timestep_ale'//achar(27)//'[0m' - call oce_timestep_ale(n, mesh) - t3 = MPI_Wtime() - !___compute energy diagnostics..._______________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call compute_diagnostics(1)'//achar(27)//'[0m' - call compute_diagnostics(1, mesh) - t4 = MPI_Wtime() - !___prepare output______________________________________________________ - if (flag_debug .and. mype==0) print *, achar(27)//'[34m'//' --> call output (n)'//achar(27)//'[0m' - call output (n, mesh) - t5 = MPI_Wtime() - call restart(n, .false., .false., mesh) - t6 = MPI_Wtime() - - rtime_fullice = rtime_fullice + t2 - t1 - rtime_compute_diag = rtime_compute_diag + t4 - t3 - rtime_write_means = rtime_write_means + t5 - t4 - rtime_write_restart = rtime_write_restart + t6 - t5 - rtime_read_forcing = rtime_read_forcing + t1_frc - t0_frc - end do - - call finalize_output() - - !___FINISH MODEL RUN________________________________________________________ - - call MPI_Barrier(MPI_COMM_FESOM, MPIERR) - if (mype==0) then - t1 = MPI_Wtime() - runtime_alltimesteps = real(t1-t0,real32) - write(*,*) 'FESOM Run is finished, updating clock' - endif - - mean_rtime(1) = rtime_oce - mean_rtime(2) = rtime_oce_mixpres - mean_rtime(3) = rtime_oce_dyn - mean_rtime(4) = rtime_oce_dynssh - mean_rtime(5) = rtime_oce_solvessh - mean_rtime(6) = rtime_oce_GMRedi - mean_rtime(7) = rtime_oce_solvetra - mean_rtime(8) = rtime_ice - mean_rtime(9) = rtime_tot - mean_rtime(10) = rtime_fullice - rtime_read_forcing - mean_rtime(11) = rtime_compute_diag - mean_rtime(12) = rtime_write_means - mean_rtime(13) = rtime_write_restart - mean_rtime(14) = rtime_read_forcing - - max_rtime(1:14) = mean_rtime(1:14) - min_rtime(1:14) = mean_rtime(1:14) - - call MPI_AllREDUCE(MPI_IN_PLACE, mean_rtime, 14, MPI_REAL, MPI_SUM, MPI_COMM_FESOM, MPIerr) - mean_rtime(1:14) = mean_rtime(1:14) / real(npes,real32) - call MPI_AllREDUCE(MPI_IN_PLACE, max_rtime, 14, MPI_REAL, MPI_MAX, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(MPI_IN_PLACE, min_rtime, 14, MPI_REAL, MPI_MIN, MPI_COMM_FESOM, MPIerr) - - if (mype==0) then - write(*,*) '___MODEL RUNTIME mean, min, max per task [seconds]________________________' - write(*,*) ' runtime ocean:',mean_rtime(1), min_rtime(1), max_rtime(1) - write(*,*) ' > runtime oce. mix,pres. :',mean_rtime(2), min_rtime(2), max_rtime(2) - write(*,*) ' > runtime oce. dyn. u,v,w:',mean_rtime(3), min_rtime(3), max_rtime(3) - write(*,*) ' > runtime oce. dyn. ssh :',mean_rtime(4), min_rtime(4), max_rtime(4) - write(*,*) ' > runtime oce. solve ssh:',mean_rtime(5), min_rtime(5), max_rtime(5) - write(*,*) ' > runtime oce. GM/Redi :',mean_rtime(6), min_rtime(6), max_rtime(6) - write(*,*) ' > runtime oce. tracer :',mean_rtime(7), min_rtime(7), max_rtime(7) - write(*,*) ' runtime ice :',mean_rtime(10), min_rtime(10), max_rtime(10) - write(*,*) ' > runtime ice step :',mean_rtime(8), min_rtime(8), max_rtime(8) - write(*,*) ' runtime diag: ', mean_rtime(11), min_rtime(11), max_rtime(11) - write(*,*) ' runtime output: ', mean_rtime(12), min_rtime(12), max_rtime(12) - write(*,*) ' runtime restart:', mean_rtime(13), min_rtime(13), max_rtime(13) - write(*,*) ' runtime forcing:', mean_rtime(14), min_rtime(14), max_rtime(14) - write(*,*) ' runtime total (ice+oce):',mean_rtime(9), min_rtime(9), max_rtime(9) - write(*,*) - write(*,*) '============================================' - write(*,*) '=========== BENCHMARK RUNTIME ==============' - write(*,*) ' Number of cores : ',npes - write(*,*) ' Runtime for all timesteps : ',runtime_alltimesteps,' sec' - write(*,*) '============================================' - write(*,*) - end if -! call clock_finish - call par_ex -end program main - diff --git a/src/gen_bulk_formulae.F90 b/src/gen_bulk_formulae.F90 index 2704fd974..3f3ee05cb 100755 --- a/src/gen_bulk_formulae.F90 +++ b/src/gen_bulk_formulae.F90 @@ -1,11 +1,11 @@ MODULE gen_bulk ! Compute heat and momentum exchange coefficients use mod_mesh - use i_therm_param - use i_arrays + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_ICE use g_forcing_arrays use g_forcing_param, only: ncar_bulk_z_wind, ncar_bulk_z_tair, ncar_bulk_z_shum - use g_parsup use o_param, only: WP use g_sbf, only: atmdata, i_totfl, i_xwind, i_ywind, i_humi, i_qsr, i_qlw, i_tair, i_prec, i_mslp, i_cloud @@ -18,7 +18,7 @@ MODULE gen_bulk ! ! !_______________________________________________________________________________ -subroutine ncar_ocean_fluxes_mode_fesom14(mesh) +subroutine ncar_ocean_fluxes_mode_fesom14(ice, partit, mesh) ! Compute drag coefficient and the transfer coefficients for evaporation ! and sensible heat according to LY2004. ! In this routine we assume air temperature and humidity are at the same @@ -46,9 +46,20 @@ subroutine ncar_ocean_fluxes_mode_fesom14(mesh) real(kind=WP), parameter :: grav = 9.80_WP, vonkarm = 0.40_WP real(kind=WP), parameter :: q1=640380._WP, q2=-5107.4_WP ! for saturated surface specific humidity real(kind=WP), parameter :: zz = 10.0_WP - type(t_mesh), intent(in) , target :: mesh - - do i=1,myDim_nod2d+eDim_nod2d + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice + real(kind=WP), dimension(:) , pointer :: T_oc_array, u_w, v_w + real(kind=WP) , pointer :: inv_rhoair, tmelt + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + T_oc_array => ice%srfoce_temp(:) + inv_rhoair => ice%thermo%inv_rhoair + tmelt => ice%thermo%tmelt +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, m, cd_n10, ce_n10, ch_n10, cd_n10_rt, cd, ce, ch, cd_rt, zeta, x2, x, psi_m, psi_h, stab, & +!$OMP t, ts, q, qs, u, u10, tv, xx, dux, dvy, tstar, qstar, ustar, bstar ) +!$OMP DO + do i=1, partit%myDim_nod2d+partit%eDim_nod2d t=tair(i) + tmelt ! degree celcium to Kelvin ts=t_oc_array(i) + tmelt ! q=shum(i) @@ -102,17 +113,17 @@ subroutine ncar_ocean_fluxes_mode_fesom14(mesh) ch = ch_n10/(1.0_WP+ch_n10*xx/cd_n10_rt)*sqrt(cd/cd_n10) ! 10b (corrected code aug2007) ce = ce_n10/(1.0_WP+ce_n10*xx/cd_n10_rt)*sqrt(cd/cd_n10) ! 10c (corrected code aug2007) end do - cd_atm_oce_arr(i)=cd ch_atm_oce_arr(i)=ch ce_atm_oce_arr(i)=ce end do - +!$OMP END DO +!$OMP END PARALLEL end subroutine ncar_ocean_fluxes_mode_fesom14 ! ! !_______________________________________________________________________________ -subroutine ncar_ocean_fluxes_mode(mesh) +subroutine ncar_ocean_fluxes_mode(ice, partit, mesh) ! Compute drag coefficient and the transfer coefficients for evaporation ! and sensible heat according to LY2004. ! with updates from Large et al. 2009 for the computation of the wind drag @@ -151,9 +162,23 @@ subroutine ncar_ocean_fluxes_mode(mesh) real(kind=WP) :: test, cd_prev, inc_ratio=1.0e-4 real(kind=WP) :: t_prev, q_prev - type(t_mesh), intent(in) , target :: mesh + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice + real(kind=WP), dimension(:) , pointer :: T_oc_array, u_w, v_w + real(kind=WP) , pointer :: inv_rhoair, tmelt + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + T_oc_array => ice%srfoce_temp(:) + inv_rhoair => ice%thermo%inv_rhoair + tmelt => ice%thermo%tmelt + - do i=1,myDim_nod2d+eDim_nod2d +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, m, cd_n10, ce_n10, ch_n10, cd_n10_rt, hl1, cd, ce, ch, cd_rt, x2, x, stab, & +!$OMP zeta_u, zeta_t, zeta_q, psi_m_u, psi_h_u, psi_m_t, psi_h_t, psi_m_q, psi_h_q, & +!$OMP ts, qs, tv, xx, dux, dvy, t, t10, q, q10, u, u10 ) +!$OMP DO + do i=1,partit%myDim_nod2d+partit%eDim_nod2d if (mesh%ulevels_nod2d(i)>1) cycle ! degree celcium to Kelvin t = tair(i) + tmelt @@ -261,7 +286,6 @@ subroutine ncar_ocean_fluxes_mode(mesh) !___________________________________________________________________ ! (3a) shift wind speed to 10m and neutral stability u10 = u/(1.0_WP+cd_n10_rt*(log(ncar_bulk_z_wind/10._WP)-psi_m_u)/vonkarm) ! L-Y eqn. 9a !why cd_n10_rt not cd_rt -!!PS u10 = u/(1.0_WP+cd_rt*(log(ncar_bulk_z_wind/10._WP)-psi_m_u)/vonkarm) ! L-Y eqn. 9a !why cd_n10_rt not cd_rt u10 = max(u10, u10min) ! 0.3 [m/s] floor on wind ! (3b) shift temperature and humidity to wind height t10 = t - tstar/vonkarm*(log(ncar_bulk_z_tair/ncar_bulk_z_wind)+psi_h_u-psi_h_t)! L-Y eqn. 9b @@ -310,43 +334,44 @@ subroutine ncar_ocean_fluxes_mode(mesh) ! final transfer coefficients for wind, sensible heat and evaporation cd_atm_oce_arr(i)=cd ch_atm_oce_arr(i)=ch - ce_atm_oce_arr(i)=ce - + ce_atm_oce_arr(i)=ce end do - +!$OMP END DO +!$OMP END PARALLEL end subroutine ncar_ocean_fluxes_mode ! !--------------------------------------------------------------------------------------------------- ! -subroutine cal_wind_drag_coeff +subroutine cal_wind_drag_coeff(partit) ! Compute wind-ice drag coefficient following AOMIP ! ! Coded by Qiang Wang ! Reviewed by ?? !-------------------------------------------------- - use o_mesh - use i_arrays use g_forcing_arrays - use g_parsup implicit none - integer :: i - real(kind=WP) :: ws + integer :: i + real(kind=WP) :: ws + type(t_partit), intent(in) :: partit - do i=1,myDim_nod2d+eDim_nod2d +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, ws) + do i=1,partit%myDim_nod2d+partit%eDim_nod2d ws=sqrt(u_wind(i)**2+v_wind(i)**2) cd_atm_ice_arr(i)=(1.1_WP+0.04_WP*ws)*1.0e-3_WP end do - +!$OMP END PARALLEL DO end subroutine cal_wind_drag_coeff ! -SUBROUTINE nemo_ocean_fluxes_mode +SUBROUTINE nemo_ocean_fluxes_mode(ice, partit) !!---------------------------------------------------------------------- !! ** Purpose : Change model variables according to atm fluxes !! source of original code: NEMO 3.1.1 + NCAR !!---------------------------------------------------------------------- IMPLICIT NONE + type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice integer :: i real(wp) :: rtmp ! temporal real real(wp) :: wndm ! delta of wind module and ocean curent module @@ -365,9 +390,13 @@ SUBROUTINE nemo_ocean_fluxes_mode t_zu, & ! air temp. shifted at zu [K] q_zu ! spec. hum. shifted at zu [kg/kg] real(wp) :: zevap, zqsb, zqla, zqlw -!!$OMP PARALLEL -!!$OMP DO - do i = 1, myDim_nod2D+eDim_nod2d + real(kind=WP), dimension(:) , pointer :: u_w, v_w, t_oc_array + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + t_oc_array => ice%srfoce_temp(:) + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, wdx, wdy, wndm, zst, q_sat, Cd, Ch, Ce, t_zu, q_zu) + do i = 1, partit%myDim_nod2D+partit%eDim_nod2d wdx = atmdata(i_xwind,i) - u_w(i) ! wind from data - ocean current ( x direction) wdy = atmdata(i_ywind,i) - v_w(i) ! wind from data - ocean current ( y direction) wndm = SQRT( wdx * wdx + wdy * wdy ) @@ -381,8 +410,7 @@ SUBROUTINE nemo_ocean_fluxes_mode ch_atm_oce_arr(i)=Ch ce_atm_oce_arr(i)=Ce end do -!!$OMP END DO -!!$OMP END PARALLEL +!$OMP END PARALLEL DO END SUBROUTINE nemo_ocean_fluxes_mode !------------------------------------------------------------------------------- diff --git a/src/gen_comm.F90 b/src/gen_comm.F90 index 8d6c4f345..26d318008 100755 --- a/src/gen_comm.F90 +++ b/src/gen_comm.F90 @@ -4,33 +4,38 @@ ! The communication rules are saved. ! set_par_support in the main phase just allocates memory for buffer ! arrays, the rest is read together with mesh from saved files. - -!KK: moved par_ex,set_par_support,set_par_support_ini to module g_PARSUP - -! =============================================================== !======================================================================= -subroutine communication_nodn(mesh) +subroutine communication_nodn(partit, mesh) use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n, np, prank, el, r_count, s_count, q, i, j, nod, k, l - integer :: num_send(0:npes-1), num_recv(0:npes-1), nd_count + integer :: num_send(0:partit%npes-1), num_recv(0:partit%npes-1), nd_count integer, allocatable :: recv_from_pe(:), send_to_pes(:,:) logical :: max_laendereck_too_small=.false. integer :: IERR +#include "associate_part_def.h" #include "associate_mesh_ini.h" +#include "associate_part_ass.h" !part only ! Assume we have 2D partitioning vector in part. Find communication rules ! Reduce allocation: find all neighboring PE - nd_count = count(part(1:nod2d) == mype) +! write(*,*) nod2d +! write(*,*) MAX_LAENDERECK +! write(*,*) nd_count +! write(*,*) allocated(partit%myList_nod2D) +! write(*,*) partit%mype allocate(recv_from_pe(nod2d), send_to_pes(MAX_LAENDERECK,nd_count), & - myList_nod2D(nd_count), STAT=IERR) + partit%myList_nod2D(nd_count), STAT=IERR) if (IERR /= 0) then write (*,*) 'Could not allocate arrays in communication_nodn' stop endif + myList_nod2D=>partit%myList_nod2D nd_count = 0 do n=1,nod2D ! Checks if element el has nodes that belong to different partitions @@ -159,8 +164,8 @@ subroutine communication_nodn(mesh) r_count = 0 eDim_nod2D=com_nod2D%rptr(com_nod2D%rPEnum+1)-1 - allocate(com_nod2D%rlist(eDim_nod2D), & - com_nod2D%slist(com_nod2D%sptr(com_nod2D%sPEnum+1)-1), STAT=IERR) + allocate(partit%com_nod2D%rlist(eDim_nod2D), & + partit%com_nod2D%slist(com_nod2D%sptr(com_nod2D%sPEnum+1)-1), STAT=IERR) if (IERR /= 0) then write (*,*) 'Could not allocate arrays in communication_nodn' stop @@ -215,19 +220,23 @@ subroutine communication_nodn(mesh) end subroutine communication_nodn !========================================================================== -subroutine communication_elemn(mesh) +subroutine communication_elemn(partit, mesh) use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, allocatable :: recv_from_pe(:), send_to_pes(:,:) logical :: max_laendereck_too_small=.false. integer :: n, k, ep, np, prank, el, nod integer :: p, q, j, elem, i, l, r_count, s_count, el_count - integer :: num_send(0:npes-1), num_recv(0:npes-1) + integer :: num_send(0:partit%npes-1), num_recv(0:partit%npes-1) integer :: IERR +#include "associate_part_def.h" #include "associate_mesh_ini.h" +#include "associate_part_ass.h" !part only ! Assume we have 2D partitioning vector in part. Find communication ! rules. An elem is external to element n if neither of its nodes ! belongs to PE, but it is among the neighbors. Element n belongs to PE if @@ -241,7 +250,9 @@ subroutine communication_elemn(mesh) !=========================================== ! com_elem2D !=========================================== - + com_elem2D =>partit%com_elem2D + com_elem2D_full=>partit%com_elem2D_full + allocate(recv_from_pe(elem2D), STAT=IERR) if (IERR /= 0) then write (*,*) 'Could not allocate arrays in communication_elemn' @@ -258,11 +269,12 @@ subroutine communication_elemn(mesh) end do myDim_elem2D=el_count - allocate(myList_elem2D(el_count), send_to_pes(MAX_LAENDERECK,el_count), STAT=IERR) + allocate(partit%myList_elem2D(el_count), send_to_pes(MAX_LAENDERECK,el_count), STAT=IERR) if (IERR /= 0) then write (*,*) 'Could not allocate arrays in communication_elemn' stop endif + myList_elem2D=>partit%myList_elem2D myList_elem2D(1:el_count) = recv_from_pe(1:el_count) num_send(0:npes-1) = 0 @@ -362,7 +374,7 @@ subroutine communication_elemn(mesh) r_count = 0 eDim_elem2D=com_elem2D%rptr(com_elem2D%rPEnum+1)-1 - allocate(com_elem2D%rlist(eDim_elem2D)) + allocate(partit%com_elem2D%rlist(eDim_elem2D)) do np = 1,com_elem2D%rPEnum prank = com_elem2D%rPE(np) do el = 1, elem2D @@ -374,7 +386,7 @@ subroutine communication_elemn(mesh) end do s_count = 0 - allocate(com_elem2D%slist(com_elem2D%sptr(com_elem2D%sPEnum+1)-1)) + allocate(partit%com_elem2D%slist(com_elem2D%sptr(com_elem2D%sPEnum+1)-1)) do np = 1,com_elem2D%sPEnum prank = com_elem2D%sPE(np) do l = 1, el_count @@ -487,7 +499,7 @@ subroutine communication_elemn(mesh) ! Lists themselves r_count = 0 - allocate(com_elem2D_full%rlist(com_elem2D_full%rptr(com_elem2D_full%rPEnum+1)-1)) + allocate(partit%com_elem2D_full%rlist(com_elem2D_full%rptr(com_elem2D_full%rPEnum+1)-1)) do np = 1,com_elem2D_full%rPEnum prank = com_elem2D_full%rPE(np) do el = 1, elem2D @@ -514,15 +526,20 @@ subroutine communication_elemn(mesh) deallocate(recv_from_pe, send_to_pes) end subroutine communication_elemn !========================================================================== -subroutine mymesh(mesh) +subroutine mymesh(partit, mesh) use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n, counter, q, k, elem, q2, eledges(4) integer, allocatable :: aux(:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !======= NODES ! Owned nodes + external nodes which I need: @@ -640,18 +657,3 @@ subroutine mymesh(mesh) ! shared edges which mype updates end subroutine mymesh !================================================================= -#ifndef FVOM_INIT -subroutine status_check -use g_config -use g_parsup -implicit none -integer :: res -res=0 -call MPI_Allreduce (pe_status, res, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_FESOM, MPIerr) -if (res /= 0 ) then - if (mype==0) write(*,*) 'Something Broke. Flushing and stopping...' -!!! a restart file must be written here !!! - call par_ex(1) -endif -end subroutine status_check -#endif diff --git a/src/gen_events.F90 b/src/gen_events.F90 index e364e3f0d..8a7f0e318 100644 --- a/src/gen_events.F90 +++ b/src/gen_events.F90 @@ -90,16 +90,17 @@ end subroutine step_event ! !-------------------------------------------------------------------------------------------- ! -subroutine handle_err(errcode) - use g_parsup +subroutine handle_err(errcode, partit) + USE MOD_PARTIT + USE MOD_PARSUP implicit none #include "netcdf.inc" - - integer errcode + type(t_partit), intent(inout) :: partit + integer :: errcode write(*,*) 'Error: ', nf_strerror(errcode) - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) stop end subroutine handle_err ! diff --git a/src/gen_forcing_couple.F90 b/src/gen_forcing_couple.F90 index 2a39b34b7..14da090ff 100755 --- a/src/gen_forcing_couple.F90 +++ b/src/gen_forcing_couple.F90 @@ -1,58 +1,95 @@ module force_flux_consv_interface interface - subroutine force_flux_consv(field2d, mask, n, h, do_stats, mesh) + subroutine force_flux_consv(field2d, mask, n, h, do_stats, partit, mesh) use mod_mesh - use g_parsup !myDim_nod2D, eDim_nod2D, MPI stuff - real(kind=WP), intent (inout) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), intent (in) :: mask(myDim_nod2D+eDim_nod2D) + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent (inout) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent (in) :: mask(partit%myDim_nod2D+partit%eDim_nod2D) integer, intent (in) :: n, h logical, intent (in) :: do_stats - type(t_mesh), intent(in) , target :: mesh end subroutine end interface end module module compute_residual_interface interface - subroutine compute_residual(field2d, mask, n, mesh) + subroutine compute_residual(field2d, mask, n, partit, mesh) use mod_mesh - use g_parsup !myDim_nod2D, eDim_nod2D, MPI stuff - real(kind=WP), intent (in) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), intent (in) :: mask(myDim_nod2D+eDim_nod2D) + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent (in) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent (in) :: mask(partit%myDim_nod2D+partit%eDim_nod2D) integer, intent (in) :: n - type(t_mesh), intent(in) , target :: mesh end subroutine end interface end module module integrate_2D_interface interface - subroutine integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, mesh) + subroutine integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) use mod_mesh - use g_parsup !myDim_nod2D, eDim_nod2D, MPI stuff + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit real(kind=WP), intent (out) :: flux_global(2), flux_local(2) real(kind=WP), intent (out) :: eff_vol(2) - real(kind=WP), intent (in) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), intent (in) :: mask(myDim_nod2D +eDim_nod2D) - type(t_mesh), intent(in) , target :: mesh + real(kind=WP), intent (in) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent (in) :: mask(partit%myDim_nod2D +partit%eDim_nod2D) end subroutine end interface end module +module update_atm_forcing_interface + interface + subroutine update_atm_forcing(istep, ice, tracers, dynamics, partit, mesh) + USE MOD_TRACER + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE MOD_DYN + integer, intent(in) :: istep + type(t_ice), intent(inout), target :: ice + type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + end interface +end module + +module net_rec_from_atm_interface + interface + subroutine net_rec_from_atm(action, partit) + USE MOD_PARTIT + USE MOD_PARSUP + logical, intent(in) :: action + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module ! Routines for updating ocean surface forcing fields !------------------------------------------------------------------------- -subroutine update_atm_forcing(istep, mesh) +subroutine update_atm_forcing(istep, ice, tracers, dynamics, partit, mesh) use o_PARAM - use mod_MESH + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_ICE + use MOD_DYN use o_arrays - use i_arrays - use i_param - use i_therm_param use g_forcing_param use g_forcing_arrays - use g_parsup use g_clock use g_config use g_comm_auto use g_rotate_grid + use net_rec_from_atm_interface use g_sbf, only: sbc_do use g_sbf, only: atmdata, i_totfl, i_xwind, i_ywind, i_humi, i_qsr, i_qlw, i_tair, i_prec, i_mslp, i_cloud, i_snow, & l_xwind, l_ywind, l_humi, l_qsr, l_qlw, l_tair, l_prec, l_mslp, l_cloud, l_snow @@ -63,8 +100,14 @@ subroutine update_atm_forcing(istep, mesh) use force_flux_consv_interface implicit none - type(t_mesh), intent(in) , target :: mesh - integer :: i, istep,itime,n2,n,nz,k,elem + integer, intent(in) :: istep + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_dyn) , intent(in), target :: dynamics + !_____________________________________________________________________________ + integer :: i, itime,n2,n,nz,k,elem real(kind=WP) :: i_coef, aux real(kind=WP) :: dux, dvy,tx,ty,tvol real(kind=WP) :: t1, t2 @@ -84,9 +127,52 @@ subroutine update_atm_forcing(istep, mesh) !integer, parameter :: nci=192, ncj=94 ! T62 grid !real(kind=WP), dimension(nci,ncj) :: array_nc, array_nc2,array_nc3,x !character(500) :: file -#include "associate_mesh.h" + !_____________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice, u_w, v_w + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y +#if defined (__oasis) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: oce_heat_flux, ice_heat_flux + real(kind=WP), dimension(:), pointer :: tmp_oce_heat_flux, tmp_ice_heat_flux +#endif +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, ice_alb, enthalpyoffuse + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), pointer :: tmelt + real(kind=WP), dimension(:,:,:), pointer :: UVnode +#endif + real(kind=WP) , pointer :: rhoair +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + stress_atmice_x => ice%stress_atmice_x(:) + stress_atmice_y => ice%stress_atmice_y(:) +#if defined (__oifs) || defined (__ifsinterface) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + ice_temp => ice%data(4)%values(:) + ice_alb => ice%atmcoupl%ice_alb(:) + enthalpyoffuse => ice%atmcoupl%enthalpyoffuse(:) + tmelt => ice%thermo%tmelt + UVnode => dynamics%uvnode(:,:,:) +#endif +#if defined (__oasis) || defined (__ifsinterface) + oce_heat_flux => ice%atmcoupl%oce_flx_h(:) + ice_heat_flux => ice%atmcoupl%ice_flx_h(:) + tmp_oce_heat_flux=> ice%atmcoupl%tmpoce_flx_h(:) + tmp_ice_heat_flux=> ice%atmcoupl%tmpice_flx_h(:) +#endif + rhoair => ice%thermo%rhoair + + !_____________________________________________________________________________ t1=MPI_Wtime() -#ifdef __oasis +#if defined (__oasis) if (firstcall) then allocate(exchange(myDim_nod2D+eDim_nod2D), mask(myDim_nod2D+eDim_nod2D)) allocate(a2o_fcorr_stat(nrecv,6)) @@ -98,25 +184,33 @@ subroutine update_atm_forcing(istep, mesh) do i=1,nsend exchange =0. if (i.eq.1) then -#if defined (__oifs) +#if defined (__oifs) ! AWI-CM3 outgoing state vectors - do n=1,myDim_nod2D+eDim_nod2D - exchange(n)=tr_arr(1, n, 1)+tmelt ! sea surface temperature [K] - end do + do n=1,myDim_nod2D+eDim_nod2D + exchange(n)=tracers%data(1)%values(1, n)+tmelt ! sea surface temperature [K] + end do elseif (i.eq.2) then - exchange(:) = a_ice(:) ! ice concentation [%] + exchange(:) = a_ice(:) ! ice concentation [%] elseif (i.eq.3) then - exchange(:) = m_snow(:) ! snow thickness + exchange(:) = m_snow(:) ! snow thickness elseif (i.eq.4) then - exchange(:) = ice_temp(:) ! ice surface temperature + exchange(:) = ice_temp(:) ! ice surface temperature elseif (i.eq.5) then - exchange(:) = ice_alb(:) ! ice albedo - else + exchange(:) = ice_alb(:) ! ice albedo + elseif (i.eq.6) then + do n=1,myDim_nod2D+eDim_nod2D + exchange(n) = UVnode(1,1,n) + end do + elseif (i.eq.7) then + do n=1,myDim_nod2D+eDim_nod2D + exchange(n) = UVnode(2,1,n) + end do + else print *, 'not installed yet or error in cpl_oasis3mct_send', mype #else ! AWI-CM2 outgoing state vectors do n=1,myDim_nod2D+eDim_nod2D - exchange(n)=tr_arr(1, n, 1) ! sea surface temperature [°C] + exchange(n)=tracers%data(1)%values(1, n) ! sea surface temperature [°C] end do elseif (i.eq.2) then exchange(:) = m_ice(:) ! ice thickness [m] @@ -128,7 +222,7 @@ subroutine update_atm_forcing(istep, mesh) print *, 'not installed yet or error in cpl_oasis3mct_send', mype #endif endif - call cpl_oasis3mct_send(i, exchange, action) + call cpl_oasis3mct_send(i, exchange, action, partit) enddo #ifdef VERBOSE do i=1, nsend @@ -138,101 +232,103 @@ subroutine update_atm_forcing(istep, mesh) mask=1. do i=1,nrecv exchange =0.0 - call cpl_oasis3mct_recv (i,exchange,action) + call cpl_oasis3mct_recv (i, exchange, action, partit) !if (.not. action) cycle !Do not apply a correction at first time step! - if (i==1 .and. action .and. istep/=1) call net_rec_from_atm(action) - if (i.eq.1) then - if (.not. action) cycle - stress_atmoce_x(:) = exchange(:) ! taux_oce - do_rotate_oce_wind=.true. - elseif (i.eq.2) then - if (.not. action) cycle - stress_atmoce_y(:) = exchange(:) ! tauy_oce - do_rotate_oce_wind=.true. - elseif (i.eq.3) then - if (.not. action) cycle - stress_atmice_x(:) = exchange(:) ! taux_ice - do_rotate_ice_wind=.true. - elseif (i.eq.4) then - if (.not. action) cycle - stress_atmice_y(:) = exchange(:) ! tauy_ice - do_rotate_ice_wind=.true. - elseif (i.eq.5) then - if (action) then - prec_rain(:) = exchange(:) ! tot_prec - mask=1. - call force_flux_consv(prec_rain, mask, i, 0,action, mesh) - end if - elseif (i.eq.6) then - if (action) then - prec_snow(:) = exchange(:) ! snowfall - mask=1. - call force_flux_consv(prec_snow, mask,i,1,action, mesh) ! Northern hemisphere - call force_flux_consv(prec_snow, mask,i,2,action, mesh) ! Southern Hemisphere - end if + if (i==1 .and. action .and. istep/=1) call net_rec_from_atm(action, partit) + if (i.eq.1) then + if (.not. action) cycle + stress_atmoce_x(:) = exchange(:) ! taux_oce + do_rotate_oce_wind=.true. + elseif (i.eq.2) then + if (.not. action) cycle + stress_atmoce_y(:) = exchange(:) ! tauy_oce + do_rotate_oce_wind=.true. + elseif (i.eq.3) then + if (.not. action) cycle + stress_atmice_x(:) = exchange(:) ! taux_ice + do_rotate_ice_wind=.true. + elseif (i.eq.4) then + if (.not. action) cycle + stress_atmice_y(:) = exchange(:) ! tauy_ice + do_rotate_ice_wind=.true. + elseif (i.eq.5) then + if (action) then + prec_rain(:) = exchange(:) ! tot_prec + mask=1. + call force_flux_consv(prec_rain, mask, i, 0,action, partit, mesh) + end if + elseif (i.eq.6) then + if (action) then + prec_snow(:) = exchange(:) ! snowfall + mask=1. + call force_flux_consv(prec_snow, mask,i,1,action, partit, mesh) ! Northern hemisphere + call force_flux_consv(prec_snow, mask,i,2,action, partit, mesh) ! Southern Hemisphere + end if elseif (i.eq.7) then - if (action) then - evap_no_ifrac(:) = exchange(:) ! tot_evap - tmp_evap_no_ifrac(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=1.-a_ice - evap_no_ifrac(:) = tmp_evap_no_ifrac(:) - call force_flux_consv(evap_no_ifrac,mask,i,0,action, mesh) - elseif (i.eq.8) then - if (action) then - sublimation(:) = exchange(:) ! tot_subl - tmp_sublimation(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=a_ice - sublimation(:) = tmp_sublimation(:) - call force_flux_consv(sublimation,mask,i,1,action, mesh) ! Northern hemisphere - call force_flux_consv(sublimation,mask,i,2,action, mesh) ! Southern Hemisphere - elseif (i.eq.9) then - if (action) then - oce_heat_flux(:) = exchange(:) ! heat_oce - tmp_oce_heat_flux(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=1.-a_ice - oce_heat_flux(:) = tmp_oce_heat_flux(:) - call force_flux_consv(oce_heat_flux, mask, i, 0,action, mesh) - elseif (i.eq.10) then - if (action) then - ice_heat_flux(:) = exchange(:) ! heat_ice - tmp_ice_heat_flux(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=a_ice - ice_heat_flux(:) = tmp_ice_heat_flux(:) - call force_flux_consv(ice_heat_flux, mask, i, 1,action, mesh) ! Northern hemisphere - call force_flux_consv(ice_heat_flux, mask, i, 2,action, mesh) ! Southern Hemisphere - elseif (i.eq.11) then - if (action) then - shortwave(:) = exchange(:) ! heat_swr - tmp_shortwave(:) = exchange(:) ! to reset for flux - ! correction - end if - mask=1.-a_ice - shortwave(:) = tmp_shortwave(:) - call force_flux_consv(shortwave, mask, i, 0,action, mesh) - elseif (i.eq.12) then - if (action) then - runoff(:) = exchange(:) ! AWI-CM2: runoff, AWI-CM3: runoff + excess snow on glaciers - mask=1. - call force_flux_consv(runoff, mask, i, 0,action, mesh) - end if + if (action) then + evap_no_ifrac(:) = exchange(:) ! tot_evap + tmp_evap_no_ifrac(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=1.-a_ice + evap_no_ifrac(:) = tmp_evap_no_ifrac(:) + call force_flux_consv(evap_no_ifrac,mask,i,0,action, partit, mesh) + elseif (i.eq.8) then + if (action) then + sublimation(:) = exchange(:) ! tot_subl + tmp_sublimation(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=a_ice + sublimation(:) = tmp_sublimation(:) + call force_flux_consv(sublimation,mask,i,1,action, partit, mesh) ! Northern hemisphere + call force_flux_consv(sublimation,mask,i,2,action, partit, mesh) ! Southern Hemisphere + elseif (i.eq.9) then + if (action) then + oce_heat_flux(:) = exchange(:) ! heat_oce + tmp_oce_heat_flux(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=1.-a_ice + oce_heat_flux(:) = tmp_oce_heat_flux(:) + call force_flux_consv(oce_heat_flux, mask, i, 0,action, partit, mesh) + elseif (i.eq.10) then + if (action) then + ice_heat_flux(:) = exchange(:) ! heat_ice + tmp_ice_heat_flux(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=a_ice + ice_heat_flux(:) = tmp_ice_heat_flux(:) + call force_flux_consv(ice_heat_flux, mask, i, 1,action, partit, mesh) ! Northern hemisphere + call force_flux_consv(ice_heat_flux, mask, i, 2,action, partit, mesh) ! Southern Hemisphere + elseif (i.eq.11) then + if (action) then + shortwave(:) = exchange(:) ! heat_swr + tmp_shortwave(:) = exchange(:) ! to reset for flux + ! correction + end if + mask=1.-a_ice + shortwave(:) = tmp_shortwave(:) + call force_flux_consv(shortwave, mask, i, 0,action, partit, mesh) + elseif (i.eq.12) then + if (action) then + runoff(:) = exchange(:) ! AWI-CM2: runoff, AWI-CM3: runoff + excess snow on glaciers + mask=1. + call force_flux_consv(runoff, mask, i, 0,action, partit, mesh) + end if #if defined (__oifs) + elseif (i.eq.13) then if (action) then - enthalpyoffuse(:) = exchange(:) ! enthalpy of fusion via solid water discharge from glaciers - mask=1. - call force_flux_consv(enthalpyoffuse, mask, i, 0,action, mesh) + enthalpyoffuse(:) = exchange(:) ! enthalpy of fusion via solid water discharge from glaciers + mask=1. + call force_flux_consv(enthalpyoffuse, mask, i, 0, action, partit, mesh) end if - end if -#endif +#endif + end if + #ifdef VERBOSE if (mype==0) then write(*,*) 'FESOM RECV: flux ', i, ', max val: ', maxval(exchange) @@ -240,57 +336,68 @@ subroutine update_atm_forcing(istep, mesh) #endif end do - if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then - do n=1, myDim_nod2D+eDim_nod2D - call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) - call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) - end do - do_rotate_oce_wind=.false. - do_rotate_ice_wind=.false. - end if + if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then + do n=1, myDim_nod2D+eDim_nod2D + call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) + call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) + end do + do_rotate_oce_wind=.false. + do_rotate_ice_wind=.false. + end if #else - call sbc_do(mesh) - u_wind = atmdata(i_xwind,:) - v_wind = atmdata(i_ywind,:) - shum = atmdata(i_humi ,:) - longwave = atmdata(i_qlw ,:) - shortwave = atmdata(i_qsr ,:) - Tair = atmdata(i_tair ,:)-273.15_WP - prec_rain = atmdata(i_prec ,:)/1000._WP - prec_snow = atmdata(i_snow ,:)/1000._WP - press_air = atmdata(i_mslp ,:) ! unit should be Pa - - +#ifndef __ifsinterface + call sbc_do(partit, mesh) +!$OMP PARALLEL DO + DO n=1, myDim_nod2D+eDim_nod2D + u_wind(n) = atmdata(i_xwind,n) + v_wind(n) = atmdata(i_ywind,n) + shum(n) = atmdata(i_humi ,n) + longwave(n) = atmdata(i_qlw ,n) + shortwave(n) = atmdata(i_qsr ,n) + Tair(n) = atmdata(i_tair ,n)-273.15_WP + prec_rain(n) = atmdata(i_prec ,n)/1000._WP + prec_snow(n) = atmdata(i_snow ,n)/1000._WP + if (l_mslp) then + press_air(n) = atmdata(i_mslp ,n) ! unit should be Pa + end if + END DO +!$OMP END PARALLEL DO + if (use_cavity) then +!$OMP PARALLEL DO do i=1,myDim_nod2d+eDim_nod2d if (ulevels_nod2d(i)>1) then - u_wind(i)=0.0_WP - v_wind(i)=0.0_WP - shum(i)=0.0_WP - longwave(i)=0.0_WP - Tair(i)=0.0_WP - prec_rain(i)=0.0_WP - prec_snow(i)=0.0_WP - press_air(i)=0.0_WP - end if + u_wind(i) = 0.0_WP + v_wind(i) = 0.0_WP + shum(i) = 0.0_WP + longwave(i) = 0.0_WP + Tair(i) = 0.0_WP + prec_rain(i)= 0.0_WP + prec_snow(i)= 0.0_WP + if (l_mslp) then + press_air(i)= 0.0_WP + end if + runoff(i) = 0.0_WP + end if end do +!$OMP END PARALLEL DO endif - ! second, compute exchange coefficients ! 1) drag coefficient if(AOMIP_drag_coeff) then - call cal_wind_drag_coeff + call cal_wind_drag_coeff(partit) end if ! 2) drag coeff. and heat exchange coeff. over ocean in case using ncar formulae if(ncar_bulk_formulae) then - cd_atm_oce_arr=0.0_WP - ch_atm_oce_arr=0.0_WP - ce_atm_oce_arr=0.0_WP - call ncar_ocean_fluxes_mode(mesh) +! cd_atm_oce_arr=0.0_WP +! ch_atm_oce_arr=0.0_WP +! ce_atm_oce_arr=0.0_WP + call ncar_ocean_fluxes_mode(ice, partit, mesh) elseif(AOMIP_drag_coeff) then cd_atm_oce_arr=cd_atm_ice_arr end if ! third, compute wind stress +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, dux, dvy, aux) do i=1,myDim_nod2d+eDim_nod2d !__________________________________________________________________________ if (ulevels_nod2d(i)>1) then @@ -315,8 +422,9 @@ subroutine update_atm_forcing(istep, mesh) stress_atmice_x(i) = Cd_atm_ice_arr(i)*aux*dux stress_atmice_y(i) = Cd_atm_ice_arr(i)*aux*dvy end do - +!$OMP END PARALLEL DO ! heat and fresh water fluxes are treated in i_therm and ice2ocean +#endif /* skip all in case of __ifsinterface */ #endif /* (__oasis) */ t2=MPI_Wtime() @@ -350,33 +458,36 @@ end subroutine update_atm_forcing ! 10-12 (T.Rackow, AWI Germany) code reordering and cleanup !----------------------------------------------------------------- ! -SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, mesh) +SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, partit, mesh) use g_forcing_arrays, only : atm_net_fluxes_north, atm_net_fluxes_south, & oce_net_fluxes_north, oce_net_fluxes_south, & flux_correction_north, flux_correction_south, & flux_correction_total - use g_parsup use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP use cpl_driver, only : nrecv, cpl_recv, a2o_fcorr_stat use o_PARAM, only : mstep, WP use compute_residual_interface use integrate_2D_interface IMPLICIT NONE - - real(kind=WP), INTENT (INOUT) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), INTENT (IN) :: mask(myDim_nod2D+eDim_nod2D) - INTEGER, INTENT (IN) :: n - INTEGER, INTENT (IN) :: h !hemisphere: 0=GL, 1=NH, 2=SH - logical, INTENT (IN) :: do_stats - - real(kind=WP) :: rmask(myDim_nod2D+eDim_nod2D) - real(kind=WP) :: weight(myDim_nod2D+eDim_nod2D) - real(kind=WP) :: flux_global(2), flux_local(2) - real(kind=WP) :: eff_vol(2) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), INTENT (INOUT) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), INTENT (IN) :: mask(partit%myDim_nod2D+partit%eDim_nod2D) + INTEGER, INTENT (IN) :: n + INTEGER, INTENT (IN) :: h !hemisphere: 0=GL, 1=NH, 2=SH + logical, INTENT (IN) :: do_stats + real(kind=WP) :: rmask(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: weight(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: flux_global(2), flux_local(2) + real(kind=WP) :: eff_vol(2) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" #if defined (__oifs) return !OIFS-FESOM2 coupling uses OASIS3MCT conservative remapping instead @@ -402,7 +513,7 @@ SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, mesh) END SELECT !residual (net) fluxes; computes also oce_net_fluxes_* - call compute_residual(field2d, rmask, n, mesh) + call compute_residual(field2d, rmask, n, partit, mesh) #ifdef VERBOSE if (mype == 0) then @@ -428,7 +539,7 @@ SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, mesh) end if !integrate (masked) abs(field2d) to get positive weights - call integrate_2D(flux_global, flux_local, eff_vol, abs(field2d), rmask, mesh) + call integrate_2D(flux_global, flux_local, eff_vol, abs(field2d), rmask, partit, mesh) !get weight pattern with integral 1 if (abs(sum(flux_global))>1.e-10) then @@ -452,7 +563,7 @@ SUBROUTINE force_flux_consv(field2d, mask, n, h, do_stats, mesh) END SELECT !check conservation - call integrate_2D(flux_global, flux_local, eff_vol, field2d, rmask, mesh) + call integrate_2D(flux_global, flux_local, eff_vol, field2d, rmask, partit, mesh) #ifdef VERBOSE if (mype == 0) then write(*,'(3A,3e15.7)') 'oce NH SH GL / ', trim(cpl_recv(n)), ': ', & @@ -468,30 +579,34 @@ END SUBROUTINE force_flux_consv ! Compute the difference between the net fluxes seen by the atmosphere ! and ocean component (residual flux) for flux n. ! -SUBROUTINE compute_residual(field2d, mask, n, mesh) +SUBROUTINE compute_residual(field2d, mask, n, partit, mesh) use g_forcing_arrays, only : atm_net_fluxes_north, atm_net_fluxes_south, & oce_net_fluxes_north, oce_net_fluxes_south, & flux_correction_north, flux_correction_south, & flux_correction_total - use g_parsup use o_PARAM, only : WP use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use integrate_2D_interface IMPLICIT NONE - - real(kind=WP), INTENT(IN) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), INTENT(IN) :: mask(myDim_nod2D+eDim_nod2D) - INTEGER, INTENT(IN) :: n + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), INTENT(IN) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), INTENT(IN) :: mask(partit%myDim_nod2D+partit%eDim_nod2D) + INTEGER, INTENT(IN) :: n real(kind=WP) :: flux_global(2), flux_local(2) real(kind=WP) :: eff_vol(2) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !compute net flux (for flux n) on ocean side - call integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, mesh) + call integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) oce_net_fluxes_north(n)=flux_global(1) oce_net_fluxes_south(n)=flux_global(2) @@ -506,24 +621,25 @@ END SUBROUTINE compute_residual ! -flux_local (returned) is the net local flux (for current pc) ! -flux_global (returned) is the communicated and summarized flux_local ! -SUBROUTINE integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, mesh) - - - use g_parsup !myDim_nod2D, eDim_nod2D, MPI stuff +SUBROUTINE integrate_2D(flux_global, flux_local, eff_vol, field2d, mask, partit, mesh) use MOD_MESH - use o_PARAM, only: WP - + USE MOD_PARTIT + USE MOD_PARSUP + use o_PARAM, only: WP IMPLICIT NONE - + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(in), target :: partit real(kind=WP), INTENT(OUT) :: flux_global(2), flux_local(2) real(kind=WP), INTENT(OUT) :: eff_vol(2) - real(kind=WP), INTENT(IN) :: field2d(myDim_nod2D+eDim_nod2D) - real(kind=WP), INTENT(IN) :: mask(myDim_nod2D +eDim_nod2D) + real(kind=WP), INTENT(IN) :: field2d(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), INTENT(IN) :: mask(partit%myDim_nod2D +partit%eDim_nod2D) real(kind=WP) :: eff_vol_local(2) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" flux_local(1)=sum(lump2d_north*field2d(1:myDim_nod2D)*mask(1:myDim_nod2D)) flux_local(2)=sum(lump2d_south*field2d(1:myDim_nod2D)*mask(1:myDim_nod2D)) @@ -568,21 +684,22 @@ END SUBROUTINE integrate_2D !--------------------------------------------------------------------------------------------------- ! Receieve atmospheric net fluxes (atm_net_fluxes_north and atm_net_fluxes_south) ! -SUBROUTINE net_rec_from_atm(action) +SUBROUTINE net_rec_from_atm(action, partit) ! use g_forcing_arrays - use g_parsup use cpl_driver use o_PARAM, only: WP - + USE MOD_PARTIT + USE MOD_PARSUP IMPLICIT NONE - LOGICAL, INTENT (IN) :: action + LOGICAL, INTENT (IN) :: action + type(t_partit), intent(inout), target :: partit INTEGER :: my_global_rank, ierror INTEGER :: n - INTEGER :: status(MPI_STATUS_SIZE,npes) + INTEGER :: status(MPI_STATUS_SIZE,partit%npes) INTEGER :: request(2) - real(kind=WP) :: aux(nrecv) + real(kind=WP) :: aux(nrecv) #if defined (__oifs) return !OIFS-FESOM2 coupling uses OASIS3MCT conservative remapping and recieves no net fluxes here. #endif @@ -592,14 +709,14 @@ SUBROUTINE net_rec_from_atm(action) atm_net_fluxes_north=0. atm_net_fluxes_south=0. if (my_global_rank==target_root) then - CALL MPI_IRecv(atm_net_fluxes_north(1), nrecv, MPI_DOUBLE_PRECISION, source_root, 111, MPI_COMM_WORLD, request(1), MPIerr) - CALL MPI_IRecv(atm_net_fluxes_south(1), nrecv, MPI_DOUBLE_PRECISION, source_root, 112, MPI_COMM_WORLD, request(2), MPIerr) - CALL MPI_Waitall(2, request, status, MPIerr) + CALL MPI_IRecv(atm_net_fluxes_north(1), nrecv, MPI_DOUBLE_PRECISION, source_root, 111, MPI_COMM_WORLD, request(1), partit%MPIerr) + CALL MPI_IRecv(atm_net_fluxes_south(1), nrecv, MPI_DOUBLE_PRECISION, source_root, 112, MPI_COMM_WORLD, request(2), partit%MPIerr) + CALL MPI_Waitall(2, request, status, partit%MPIerr) end if - call MPI_Barrier(MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(atm_net_fluxes_north(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_Barrier(partit%MPI_COMM_FESOM, partit%MPIerr) + call MPI_AllREDUCE(atm_net_fluxes_north(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) atm_net_fluxes_north=aux - call MPI_AllREDUCE(atm_net_fluxes_south(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(atm_net_fluxes_south(1), aux, nrecv, MPI_DOUBLE_PRECISION, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) atm_net_fluxes_south=aux end if END SUBROUTINE net_rec_from_atm diff --git a/src/gen_forcing_init.F90 b/src/gen_forcing_init.F90 index cb69dba6b..7d2df1954 100755 --- a/src/gen_forcing_init.F90 +++ b/src/gen_forcing_init.F90 @@ -1,8 +1,11 @@ module forcing_array_setup_interfaces interface - subroutine forcing_array_setup(mesh) + subroutine forcing_array_setup(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -11,40 +14,47 @@ subroutine forcing_array_setup(mesh) ! Added the driving routine forcing_setup. ! S.D 05.04.12 ! ========================================================== -subroutine forcing_setup(mesh) -use g_parsup +subroutine forcing_setup(partit, mesh) use g_CONFIG use g_sbf, only: sbc_ini use mod_mesh +USE MOD_PARTIT +USE MOD_PARSUP use forcing_array_setup_interfaces implicit none - type(t_mesh), intent(in) , target :: mesh - if (mype==0) write(*,*) '****************************************************' +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit + + if (partit%mype==0) write(*,*) '****************************************************' if (use_ice) then - call forcing_array_setup(mesh) + call forcing_array_setup(partit, mesh) #ifndef __oasis - call sbc_ini(mesh) ! initialize forcing fields + call sbc_ini(partit, mesh) ! initialize forcing fields #endif endif end subroutine forcing_setup ! ========================================================== -subroutine forcing_array_setup(mesh) +subroutine forcing_array_setup(partit, mesh) !inializing forcing fields use o_param use mod_mesh - use i_arrays + USE MOD_PARTIT + USE MOD_PARSUP use g_forcing_arrays use g_forcing_param - use g_parsup use g_config use g_sbf, only: l_mslp, l_cloud #if defined (__oasis) use cpl_driver, only : nrecv #endif implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n2 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" n2=myDim_nod2D+eDim_nod2D ! Allocate memory for atmospheric forcing allocate(shortwave(n2), longwave(n2)) @@ -64,9 +74,13 @@ subroutine forcing_array_setup(mesh) evaporation = 0.0_WP ice_sublimation = 0.0_WP +#if defined (__oasis) || defined (__ifsinterface) + allocate(sublimation(n2), evap_no_ifrac(n2)) + sublimation=0.0_WP + evap_no_ifrac=0.0_WP +#endif #if defined (__oasis) allocate(tmp_sublimation(n2),tmp_evap_no_ifrac(n2), tmp_shortwave(n2)) - allocate(sublimation(n2),evap_no_ifrac(n2)) allocate(atm_net_fluxes_north(nrecv), atm_net_fluxes_south(nrecv)) allocate(oce_net_fluxes_north(nrecv), oce_net_fluxes_south(nrecv)) allocate(flux_correction_north(nrecv), flux_correction_south(nrecv)) @@ -81,11 +95,13 @@ subroutine forcing_array_setup(mesh) flux_correction_north=0.0_WP flux_correction_south=0.0_WP flux_correction_total=0.0_WP - evap_no_ifrac=0.0_WP - sublimation=0.0_WP + + allocate(residualifwflx(n2)) + residualifwflx = 0.0_WP #endif + ! Temp storage for averaging !!PS allocate(aver_temp(n2)) @@ -131,10 +147,11 @@ subroutine forcing_array_setup(mesh) !for ice diagnose if(use_ice) then - allocate(thdgr(n2), thdgrsn(n2), flice(n2)) +! allocate(thdgr(n2), thdgrsn(n2)) + allocate(flice(n2)) allocate(olat_heat(n2), osen_heat(n2), olwout(n2)) - thdgr=0.0_WP - thdgrsn=0.0_WP +! thdgr=0.0_WP +! thdgrsn=0.0_WP flice=0.0_WP olat_heat=0.0_WP osen_heat=0.0_WP diff --git a/src/gen_halo_exchange.F90 b/src/gen_halo_exchange.F90 index af1d29b84..c1dbd3eac 100755 --- a/src/gen_halo_exchange.F90 +++ b/src/gen_halo_exchange.F90 @@ -23,62 +23,65 @@ module g_comm contains #ifdef DEBUG - ! Only needed in debug mode - subroutine check_mpi_comm(rn, sn, r_mpitype, s_mpitype, rPE, sPE) - USE g_PARSUP - IMPLICIT NONE - - ! General version of the communication routine for 2D nodal fields - - integer, intent(in) :: sn, rn, r_mpitype(:), s_mpitype(:), rPE(:), sPE(:) - integer :: n, sdebug, rdebug, status(MPI_STATUS_SIZE), request - - DO n=1,rn - call MPI_TYPE_SIZE(r_mpitype(n), rdebug, MPIerr) - CALL MPI_ISEND(rdebug, 1, MPI_INTEGER, rPE(n), 10, MPI_COMM_FESOM, request, MPIerr) - END DO - - DO n=1, sn - call MPI_RECV(sdebug, 1, MPI_INTEGER, sPE(n), 10, MPI_COMM_FESOM, & - status, MPIerr) - call MPI_TYPE_SIZE(s_mpitype(n), rdebug, MPIerr) - if (sdebug /= rdebug) then - print *, "Mismatching MPI send/recieve message lengths." - print *,"Send/receive process numbers: ", mype, '/', sPE(n) - print *,"Number of send/receive bytes: ", sdebug, '/', rdebug - call MPI_ABORT( MPI_COMM_FESOM, 1 ) - end if - END DO - CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) - - END SUBROUTINE check_mpi_comm +! General version of the communication routine for 2D nodal fields +! Only needed in debug mode +subroutine check_mpi_comm(rn, sn, r_mpitype, s_mpitype, rPE, sPE, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(in) :: sn, rn, r_mpitype(:), s_mpitype(:), rPE(:), sPE(:) +integer :: n, sdebug, rdebug, status(MPI_STATUS_SIZE), request +#include "associate_part_def.h" +#include "associate_part_ass.h" +DO n=1,rn + CALL MPI_TYPE_SIZE(r_mpitype(n), rdebug, MPIerr) + CALL MPI_ISEND(rdebug, 1, MPI_INTEGER, rPE(n), 10, MPI_COMM_FESOM, request, MPIerr) +END DO +DO n=1, sn + call MPI_RECV(sdebug, 1, MPI_INTEGER, sPE(n), 10, MPI_COMM_FESOM, & + status, MPIerr) + call MPI_TYPE_SIZE(s_mpitype(n), rdebug, MPIerr) + if (sdebug /= rdebug) then + print *, "Mismatching MPI send/recieve message lengths." + print *,"Send/receive process numbers: ", mype, '/', sPE(n) + print *,"Number of send/receive bytes: ", sdebug, '/', rdebug + call MPI_ABORT( MPI_COMM_FESOM, 1 ) + end if +END DO +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +END SUBROUTINE check_mpi_comm #endif -subroutine exchange_nod2D_i(nod_array2D) - -USE g_PARSUP +subroutine exchange_nod2D_i(nod_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - - integer, intent(inout) :: nod_array2D(:) - - if (npes > 1) then - call exchange_nod2D_i_begin(nod_array2D) - call exchange_nod_end +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: nod_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" +if (npes > 1) then + call exchange_nod2D_i_begin(nod_array2D, partit) + call exchange_nod_end(partit) endif END SUBROUTINE exchange_nod2D_i !============================================================================= - -subroutine exchange_nod2D_i_begin(nod_array2D) - USE o_MESH - USE g_PARSUP - IMPLICIT NONE - - ! General version of the communication routine for 2D nodal fields - - integer, intent(inout) :: nod_array2D(:) - integer :: n, sn, rn +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_i_begin(nod_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: nod_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then @@ -109,33 +112,36 @@ subroutine exchange_nod2D_i_begin(nod_array2D) END SUBROUTINE exchange_nod2D_i_begin ! ======================================================================== -subroutine exchange_nod2D(nod_array2D) - -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod_array2D(:) +subroutine exchange_nod2D(nod_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then - call exchange_nod2D_begin(nod_array2D) - call exchange_nod_end + call exchange_nod2D_begin(nod_array2D, partit) + call exchange_nod_end(partit) end if END SUBROUTINE exchange_nod2D ! ======================================================================== -subroutine exchange_nod2D_begin(nod_array2D) - USE o_MESH - USE g_PARSUP - IMPLICIT NONE - - ! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod_array2D(:) - - integer :: n, sn, rn +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_begin(nod_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then @@ -163,37 +169,41 @@ subroutine exchange_nod2D_begin(nod_array2D) END SUBROUTINE exchange_nod2D_begin !=============================================== -subroutine exchange_nod2D_2fields(nod1_array2D, nod2_array2D) - -USE g_PARSUP +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_2fields(nod1_array2D, nod2_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" -! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod1_array2D(:) - real(real64), intent(inout) :: nod2_array2D(:) if (npes > 1) then - call exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D) - call exchange_nod_end + call exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) + call exchange_nod_end(partit) end if END SUBROUTINE exchange_nod2D_2fields ! ======================================================================== -subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod1_array2D(:) - real(real64), intent(inout) :: nod2_array2D(:) - - integer :: n, sn, rn +subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" - if (npes > 1) then +if (npes > 1) then sn=com_nod2D%sPEnum rn=com_nod2D%rPEnum @@ -226,38 +236,41 @@ subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D) END SUBROUTINE exchange_nod2D_2fields_begin !=============================================== -subroutine exchange_nod2D_3fields(nod1_array2D, nod2_array2D, nod3_array2D) - -USE g_PARSUP +subroutine exchange_nod2D_3fields(nod1_array2D, nod2_array2D, nod3_array2D, partit) +! General version of the communication routine for 2D nodal fields +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +real(real64), intent(inout) :: nod3_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" -! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod1_array2D(:) - real(real64), intent(inout) :: nod2_array2D(:) - real(real64), intent(inout) :: nod3_array2D(:) if (npes > 1) then - call exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D) - call exchange_nod_end + call exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D, partit) + call exchange_nod_end(partit) end if END SUBROUTINE exchange_nod2D_3fields ! ======================================================================== -subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - +subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D, partit) ! General version of the communication routine for 2D nodal fields - - real(real64), intent(inout) :: nod1_array2D(:) - real(real64), intent(inout) :: nod2_array2D(:) - real(real64), intent(inout) :: nod3_array2D(:) - - - integer :: n, sn, rn +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +real(real64), intent(inout) :: nod3_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then @@ -298,34 +311,37 @@ subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D END SUBROUTINE exchange_nod2D_3fields_begin ! ======================================================================== -subroutine exchange_nod3D(nod_array3D) - -USE g_PARSUP -IMPLICIT NONE - -real(real64), intent(inout) :: nod_array3D(:,:) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - -if (npes > 1) then - call exchange_nod3D_begin(nod_array3D) - call exchange_nod_end +subroutine exchange_nod3D(nod_array3D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:) + +if (partit%npes > 1) then + call exchange_nod3D_begin(nod_array3D, partit) + call exchange_nod_end(partit) endif + END SUBROUTINE exchange_nod3D ! ======================================================================== -subroutine exchange_nod3D_begin(nod_array3D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - - -real(real64), intent(inout) :: nod_array3D(:,:) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - - integer :: n, sn, rn - integer :: nz, nl1 +subroutine exchange_nod3D_begin(nod_array3D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:) +integer :: n, sn, rn +integer :: nz, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then sn=com_nod2D%sPEnum @@ -338,7 +354,7 @@ subroutine exchange_nod3D_begin(nod_array3D) print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif ! Check MPI point-to-point communication for consistency @@ -346,53 +362,54 @@ subroutine exchange_nod3D_begin(nod_array3D) call check_mpi_comm(rn, sn, r_mpitype_nod3D(:,nl1,1), s_mpitype_nod3D(:,nl1,1), & com_nod2D%rPE, com_nod2D%sPE) #endif - DO n=1,rn call MPI_IRECV(nod_array3D, 1, r_mpitype_nod3D(n,nl1,1), com_nod2D%rPE(n), & com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) END DO - DO n=1, sn call MPI_ISEND(nod_array3D, 1, s_mpitype_nod3D(n,nl1,1), com_nod2D%sPE(n), & mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) END DO - com_nod2D%nreq = rn+sn endif END SUBROUTINE exchange_nod3D_begin ! ======================================================================== -subroutine exchange_nod3D_2fields(nod1_array3D,nod2_array3D) - -USE g_PARSUP -IMPLICIT NONE - -real(real64), intent(inout) :: nod1_array3D(:,:) -real(real64), intent(inout) :: nod2_array3D(:,:) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - +subroutine exchange_nod3D_2fields(nod1_array3D,nod2_array3D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array3D(:,:) +real(real64), intent(inout) :: nod2_array3D(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + if (npes > 1) then - call exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) - call exchange_nod_end + call exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) + call exchange_nod_end(partit) endif END SUBROUTINE exchange_nod3D_2fields ! ======================================================================== -subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - - -real(real64), intent(inout) :: nod1_array3D(:,:) -real(real64), intent(inout) :: nod2_array3D(:,:) +subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - - integer :: n, sn, rn - integer :: nz, nl1, nl2 +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array3D(:,:) +real(real64), intent(inout) :: nod2_array3D(:,:) +integer :: n, sn, rn +integer :: nz, nl1, nl2 +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then sn=com_nod2D%sPEnum @@ -405,7 +422,7 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif nl2 = ubound(nod2_array3D,1) @@ -414,7 +431,7 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) print *,'Subroutine exchange_nod3D not implemented for',nl2,'layers.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif #ifdef DEBUG @@ -443,41 +460,39 @@ subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D) endif END SUBROUTINE exchange_nod3D_2fields_begin ! ======================================================================== -subroutine exchange_nod3D_n(nod_array3D) -USE o_MESH -USE g_PARSUP +subroutine exchange_nod3D_n(nod_array3D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - -real(real64), intent(inout) :: nod_array3D(:,:,:) - -if (npes>1) then - call exchange_nod3D_n_begin(nod_array3D) - call exchange_nod_end +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:,:) +if (partit%npes>1) then + call exchange_nod3D_n_begin(nod_array3D, partit) + call exchange_nod_end(partit) endif END SUBROUTINE exchange_nod3D_n !================================================= - -subroutine exchange_nod3D_n_begin(nod_array3D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - -real(real64), intent(inout) :: nod_array3D(:,:,:) ! General version of the communication routine for 3D nodal fields ! stored in (vertical, horizontal) format - - integer :: n, sn, rn - integer :: nz, nl1, n_val - +subroutine exchange_nod3D_n_begin(nod_array3D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:,:) +integer :: n, sn, rn +integer :: nz, nl1, n_val +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes>1) then ! nod_array3D(n_val,nl1,nod2D_size) - nl1= ubound(nod_array3D,2) + nl1 = ubound(nod_array3D,2) n_val = ubound(nod_array3D,1) - if ((nl1ubound(r_mpitype_nod3D, 2)) .or. (n_val > 3)) then - ! This routine also works for swapped dimensions nod_array3D(nl1,n_val, nod2D_size) nl1 = ubound(nod_array3D,1) n_val = ubound(nod_array3D,2) @@ -488,7 +503,7 @@ subroutine exchange_nod3D_n_begin(nod_array3D) print *,nl1,'layers and / or ',n_val,'values per element.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif endif sn=com_nod2D%sPEnum @@ -521,17 +536,26 @@ END SUBROUTINE exchange_nod3D_n_begin ! AND WAITING !======================================= -SUBROUTINE exchange_nod_end - USE g_PARSUP +SUBROUTINE exchange_nod_end(partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit -if (npes > 1) & - call MPI_WAITALL(com_nod2D%nreq, com_nod2D%req, MPI_STATUSES_IGNORE, MPIerr) +if (partit%npes > 1) & + call MPI_WAITALL(partit%com_nod2D%nreq, partit%com_nod2D%req, MPI_STATUSES_IGNORE, partit%MPIerr) END SUBROUTINE exchange_nod_end -SUBROUTINE exchange_elem_end - - USE g_PARSUP +SUBROUTINE exchange_elem_end(partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes > 1) then if (elem_full_flag) then @@ -543,132 +567,34 @@ SUBROUTINE exchange_elem_end endif end if END SUBROUTINE exchange_elem_end -! ======================================================================== - -!nr Not used, no MPI datatype built (yet) -! -!!$subroutine exchange_edge3D(edge_array3D) -!!$ use o_MESH -!!$ use g_PARSUP -!!$ implicit none -!!$ -!!$ ! Communication of edge based data stored in (vertical, horizontal) format -!!$ -!!$ INTEGER :: sreq(maxPEnum) -!!$ INTEGER :: rreq(maxPEnum) -!!$ INTEGER :: sstat(MPI_STATUS_SIZE,maxPEnum) -!!$ INTEGER :: rstat(MPI_STATUS_SIZE,maxPEnum) -!!$ integer :: n, sn, rn, dest, nini, nend, offset, source,tag -!!$ integer :: nz, nh, nc -!!$ real(real64) :: edge_array3D(nl-1,edge2D) -!!$ -!!$ sn=com_edge2D%sPEnum -!!$ rn=com_edge2D%rPEnum -!!$ ! Put data to be communicated into send buffer -!!$ -!!$ -!!$ do n=1, sn -!!$ nini=com_edge2D%sptr(n) -!!$ nend=com_edge2D%sptr(n+1) - 1 -!!$ nc=0 -!!$ DO nh=nini, nend -!!$ DO nz=1, nl-1 -!!$ nc=nc+1 -!!$ s_buff_edge3D(n)%array(nc)=edge_array3D(nz,com_edge2D%slist(nh)) -!!$ END DO -!!$ END DO -!!$ end do -!!$ -!!$ -!!$ do n=1, sn -!!$ dest=com_edge2D%sPE(n) -!!$ nini=com_edge2D%sptr(n) -!!$ offset=(com_edge2D%sptr(n+1) - nini)*(nl-1) -!!$ -!!$ call MPI_ISEND(s_buff_edge3D(n)%array, offset, MPI_DOUBLE_PRECISION, dest, mype, & -!!$ MPI_COMM_FESOM, sreq(n), MPIerr) -!!$ end do -!!$ do n=1, rn -!!$ source=com_edge2D%rPE(n) -!!$ nini=com_edge2D%rptr(n) -!!$ offset=(com_edge2D%rptr(n+1) - nini)*(nl-1) -!!$ -!!$ call MPI_IRECV(r_buff_edge3D(n)%array, offset, MPI_DOUBLE_PRECISION, source, & -!!$ source, MPI_COMM_FESOM, rreq(n), MPIerr) -!!$ end do -!!$ -!!$ call MPI_WAITALL(sn,sreq,sstat, MPIerr) -!!$ call MPI_WAITALL(rn,rreq,rstat, MPIerr) -!!$ -!!$ ! Put received data to their destination -!!$ -!!$ do n=1, rn -!!$ nini=com_edge2D%rptr(n) -!!$ nend=com_edge2D%rptr(n+1) - 1 -!!$ nc=0 -!!$ DO nh=nini, nend -!!$ DO nz=1, nl-1 -!!$ nc=nc+1 -!!$ edge_array3D(nz,com_edge2D%rlist(nh))=r_buff_edge3D(n)%array(nc) -!!$ END DO -!!$ END DO -!!$ end do -!!$ -!!$end subroutine exchange_edge3D -!========================================================================== - -!!$subroutine exchange_edge2D(edge_array2D) -!!$ use o_MESH -!!$ use g_PARSUP -!!$ implicit none -!!$ -!!$! General version of the communication routine for 2D edge fields -!!$! This routine is not split, it is used only once during setup. -!!$ real(real64), intent(inout) :: edge_array2D(:) -!!$ -!!$ integer :: n, sn, rn -!!$ -!!$ if (npes> 1) then -!!$ sn=com_edge2D%sPEnum -!!$ rn=com_edge2D%rPEnum -!!$ -!!$ DO n=1,rn -!!$ call MPI_IRECV(edge_array2D, 1, r_mpitype_edge2D(n), com_edge2D%rPE(n), & -!!$ com_edge2D%rPE(n), MPI_COMM_FESOM, com_edge2D%req(n), MPIerr) -!!$ END DO -!!$ DO n=1, sn -!!$ call MPI_ISEND(edge_array2D, 1, s_mpitype_edge2D(n), com_edge2D%sPE(n), & -!!$ mype, MPI_COMM_FESOM, com_edge2D%req(rn+n), MPIerr) -!!$ END DO -!!$ -!!$ call MPI_WAITALL(rn+sn,com_edge2D%req,MPI_STATUSES_IGNORE, MPIerr) -!!$ -!!$ endif -!!$ -!!$end subroutine exchange_edge2D !============================================================================= -subroutine exchange_elem3D(elem_array3D) - -USE g_PARSUP +subroutine exchange_elem3D(elem_array3D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" - real(real64), intent(inout) :: elem_array3D(:,:) - - call exchange_elem3D_begin(elem_array3D) - call exchange_elem_end +call exchange_elem3D_begin(elem_array3D, partit) +call exchange_elem_end(partit) END SUBROUTINE exchange_elem3D !=========================================== -subroutine exchange_elem3D_begin(elem_array3D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - -real(real64), intent(inout) :: elem_array3D(:,:) -integer :: n, sn, rn, nl1 +subroutine exchange_elem3D_begin(elem_array3D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:) +integer :: n, sn, rn, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then @@ -721,7 +647,7 @@ subroutine exchange_elem3D_begin(elem_array3D) END DO else if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif com_elem2D%nreq = rn+sn @@ -774,7 +700,7 @@ subroutine exchange_elem3D_begin(elem_array3D) END DO else if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif com_elem2D_full%nreq = rn+sn @@ -786,32 +712,36 @@ subroutine exchange_elem3D_begin(elem_array3D) END SUBROUTINE exchange_elem3D_begin !============================================================================= -subroutine exchange_elem3D_n(elem_array3D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - - real(real64), intent(inout) :: elem_array3D(:,:,:) +subroutine exchange_elem3D_n(elem_array3D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then - call exchange_elem3D_n_begin(elem_array3D) - call exchange_elem_end + call exchange_elem3D_n_begin(elem_array3D, partit) + call exchange_elem_end(partit) endif END SUBROUTINE exchange_elem3D_n !============================================================================= -subroutine exchange_elem3D_n_begin(elem_array3D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - +subroutine exchange_elem3D_n_begin(elem_array3D, partit) ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - - real(real64), intent(inout) :: elem_array3D(:,:,:) - integer :: n, sn, rn, n_val, nl1 +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:,:) +integer :: n, sn, rn, n_val, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then nl1 = ubound(elem_array3D,2) @@ -829,7 +759,7 @@ subroutine exchange_elem3D_n_begin(elem_array3D) print *,nl1,'layers and / or ',n_val,'values per element.' print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' endif - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) endif endif @@ -887,35 +817,39 @@ subroutine exchange_elem3D_n_begin(elem_array3D) endif END SUBROUTINE exchange_elem3D_n_begin !======================================================================== -subroutine exchange_elem2D(elem_array2D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - - real(real64), intent(inout) :: elem_array2D(:) +subroutine exchange_elem2D(elem_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then - call exchange_elem2D_begin(elem_array2D) - call exchange_elem_end + call exchange_elem2D_begin(elem_array2D, partit) + call exchange_elem_end(partit) end if END SUBROUTINE exchange_elem2D !======================================================================== -subroutine exchange_elem2D_begin(elem_array2D) -USE o_MESH -USE g_PARSUP -IMPLICIT NONE - ! General version of the communication routine for 3D elemental fields ! stored in (vertical, horizontal) format - - real(real64), intent(inout) :: elem_array2D(:) - integer :: n, sn, rn +subroutine exchange_elem2D_begin(elem_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" - if (npes> 1) then +if (npes> 1) then if (ubound(elem_array2D,1)<=myDim_elem2D+eDim_elem2D) then @@ -970,32 +904,36 @@ subroutine exchange_elem2D_begin(elem_array2D) END SUBROUTINE exchange_elem2D_begin ! ======================================================================== -subroutine exchange_elem2D_i(elem_array2D) !Exchange with ALL(!) the neighbours -USE o_MESH -USE g_PARSUP +subroutine exchange_elem2D_i(elem_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - - integer, intent(inout) :: elem_array2D(:) - - integer :: n, sn, rn +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then - call exchange_elem2D_i_begin(elem_array2D) - call exchange_elem_end + call exchange_elem2D_i_begin(elem_array2D, partit) + call exchange_elem_end(partit) end if END SUBROUTINE exchange_elem2D_i !============================================================================= -subroutine exchange_elem2D_i_begin(elem_array2D) !Exchange with ALL(!) the neighbours -USE o_MESH -USE g_PARSUP +subroutine exchange_elem2D_i_begin(elem_array2D, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - - integer, intent(inout) :: elem_array2D(:) - - integer :: n, sn, rn +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then @@ -1026,28 +964,26 @@ subroutine exchange_elem2D_i_begin(elem_array2D) end if END SUBROUTINE exchange_elem2D_i_begin -!============================================================================= - - - ! ======================================================================== ! Broadcast routines ! Many because of different sizes. ! ======================================================================== -subroutine broadcast_nod3D(arr3D, arr3Dglobal) +subroutine broadcast_nod3D(arr3D, arr3Dglobal, partit) ! Distribute the nodal information available on 0 PE to other PEs -use g_PARSUP -USE o_MESH - +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - -INTEGER :: nz, counter,nl1 -integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf -real(real64) :: arr3D(:,:) -real(real64) :: arr3Dglobal(:,:) +type(t_partit), intent(inout), target :: partit +INTEGER :: nz, counter,nl1 +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr3D(:,:) +real(real64) :: arr3Dglobal(:,:) +integer :: node_size +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf, recvbuf -integer :: node_size +#include "associate_part_def.h" +#include "associate_part_ass.h" node_size=myDim_nod2D+eDim_nod2D nl1=ubound(arr3D,1) @@ -1099,19 +1035,21 @@ end subroutine broadcast_nod3D ! !============================================================================ ! -subroutine broadcast_nod2D(arr2D, arr2Dglobal) +subroutine broadcast_nod2D(arr2D, arr2Dglobal, partit) ! A 2D version of the previous routine -use g_PARSUP -USE o_MESH +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - -real(real64) :: arr2D(:) -real(real64) :: arr2Dglobal(:) - -integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf -real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf -integer :: node_size +type(t_partit), intent(in), target :: partit +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf +integer :: node_size +#include "associate_part_def.h" +#include "associate_part_ass.h" node_size=myDim_nod2D+eDim_nod2D @@ -1148,20 +1086,23 @@ end subroutine broadcast_nod2D ! !============================================================================ ! -subroutine broadcast_elem3D(arr3D, arr3Dglobal) +subroutine broadcast_elem3D(arr3D, arr3Dglobal, partit) ! Distribute the elemental information available on 0 PE to other PEs -use g_PARSUP -USE o_MESH - +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - -INTEGER :: nz, counter,nl1 -integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf -real(real64) :: arr3D(:,:) -real(real64) :: arr3Dglobal(:,:) +type(t_partit), intent(in), target :: partit +INTEGER :: nz, counter,nl1 +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr3D(:,:) +real(real64) :: arr3Dglobal(:,:) +integer :: elem_size + +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf, recvbuf -integer :: elem_size +#include "associate_part_def.h" +#include "associate_part_ass.h" elem_size=myDim_elem2D+eDim_elem2D @@ -1214,24 +1155,24 @@ end subroutine broadcast_elem3D ! !============================================================================ ! -subroutine broadcast_elem2D(arr2D, arr2Dglobal) +subroutine broadcast_elem2D(arr2D, arr2Dglobal, partit) ! A 2D version of the previous routine -use g_PARSUP -USE o_MESH +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - -integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf - -real(real64) :: arr2D(:) -real(real64) :: arr2Dglobal(:) -real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf -integer :: elem_size +type(t_partit), intent(in), target :: partit +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: elem_size +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" elem_size=myDim_elem2D+eDim_elem2D - - IF ( mype == 0 ) THEN if (npes>1) then arr2D(1:elem_size)=arr2Dglobal(myList_elem2D(1:elem_size)) @@ -1264,29 +1205,25 @@ subroutine broadcast_elem2D(arr2D, arr2Dglobal) end subroutine broadcast_elem2D ! !============================================================================ -! -subroutine gather_nod3D(arr3D, arr3D_global) - ! Make nodal information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP -USE o_MESH - - +subroutine gather_nod3D(arr3D, arr3D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real64) :: arr3D_global(:,:) +real(real64), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real64) :: arr3D(:,:) -real(real64) :: arr3D_global(:,:) -real(real64), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, n3D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1328,28 +1265,27 @@ end subroutine gather_nod3D ! !============================================================================ ! -subroutine gather_real4_nod3D(arr3D, arr3D_global) +subroutine gather_real4_nod3D(arr3D, arr3D_global, partit) ! Make nodal information available to master PE ! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP -USE o_MESH - - +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real32) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +real(real32), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real32) :: arr3D(:,:) -real(real32) :: arr3D_global(:,:) -real(real32), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, n3D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1390,27 +1326,28 @@ subroutine gather_real4_nod3D(arr3D, arr3D_global) end subroutine gather_real4_nod3D !======================================================= -subroutine gather_int2_nod3D(arr3D, arr3D_global) +subroutine gather_int2_nod3D(arr3D, arr3D_global, partit) ! Make nodal information available to master PE ! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP -USE o_MESH - +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +integer(int16) :: arr3D(:,:) +integer(int16) :: arr3D_global(:,:) +integer(int16), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -integer(int16) :: arr3D(:,:) -integer(int16) :: arr3D_global(:,:) -integer(int16), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, n3D - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1450,24 +1387,23 @@ subroutine gather_int2_nod3D(arr3D, arr3D_global) end if end subroutine gather_int2_nod3D !============================================== -subroutine gather_nod2D(arr2D, arr2D_global) - +subroutine gather_nod2D(arr2D, arr2D_global, partit) ! Make nodal information available to master PE - -use g_PARSUP -USE o_MESH - +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real64) :: arr2D_global(:) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real64) :: arr2D(:) -real(real64) :: arr2D_global(:) -real(real64), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, n2D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) @@ -1505,24 +1441,23 @@ subroutine gather_nod2D(arr2D, arr2D_global) endif end subroutine gather_nod2D !============================================== -subroutine gather_real4_nod2D(arr2D, arr2D_global) - +subroutine gather_real4_nod2D(arr2D, arr2D_global, partit) ! Make nodal information available to master PE - -use g_PARSUP -USE o_MESH - +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real32) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real32) :: arr2D(:) -real(real32) :: arr2D_global(:) -real(real32), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, n2D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) @@ -1561,24 +1496,23 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global) end subroutine gather_real4_nod2D !============================================== -subroutine gather_int2_nod2D(arr2D, arr2D_global) - -! Make nodal information available to master PE - -use g_PARSUP -USE o_MESH - +! Make nodal information available to master PE +subroutine gather_int2_nod2D(arr2D, arr2D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer(int16) :: arr2D(:) +integer(int16) :: arr2D_global(:) +integer(int16), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -integer(int16) :: arr2D(:) -integer(int16) :: arr2D_global(:) -integer(int16), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, n2D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) @@ -1617,29 +1551,27 @@ subroutine gather_int2_nod2D(arr2D, arr2D_global) end subroutine gather_int2_nod2D !============================================================================ -subroutine gather_elem3D(arr3D, arr3D_global) - +subroutine gather_elem3D(arr3D, arr3D_global, partit) ! Make element information available to master PE ! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP -USE o_MESH - - +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real64) :: arr3D_global(:,:) +real(real64), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real64) :: arr3D(:,:) -real(real64) :: arr3D_global(:,:) -real(real64), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, e3D, ende, err_alloc -integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1684,30 +1616,26 @@ subroutine gather_elem3D(arr3D, arr3D_global) end subroutine gather_elem3D !=================================================================== - -subroutine gather_real4_elem3D(arr3D, arr3D_global) - ! Make element information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP -USE o_MESH - - +subroutine gather_real4_elem3D(arr3D, arr3D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real32) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +real(real32), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real32) :: arr3D(:,:) -real(real32) :: arr3D_global(:,:) -real(real32), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, e3D, ende, err_alloc -integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1753,30 +1681,26 @@ end subroutine gather_real4_elem3D !=================================================================== - -subroutine gather_int2_elem3D(arr3D, arr3D_global) - ! Make element information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP -USE o_MESH - - +subroutine gather_int2_elem3D(arr3D, arr3D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +integer(int16) :: arr3D(:,:) +integer(int16) :: arr3D_global(:,:) +integer(int16), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -integer(int16) :: arr3D(:,:) -integer(int16) :: arr3D_global(:,:) -integer(int16), allocatable :: recvbuf(:,:) -integer :: req(npes-1) -integer :: start, e3D, ende, err_alloc -integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -1822,25 +1746,23 @@ end subroutine gather_int2_elem3D !============================================== -subroutine gather_elem2D(arr2D, arr2D_global) - ! Make element information available to master PE - -use g_PARSUP -USE o_MESH - +subroutine gather_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real64) :: arr2D_global(:) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real64) :: arr2D(:) -real(real64) :: arr2D_global(:) -real(real64), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, e2D - - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -1881,26 +1803,25 @@ subroutine gather_elem2D(arr2D, arr2D_global) end subroutine gather_elem2D -!============================================== -subroutine gather_real4_elem2D(arr2D, arr2D_global) - +!================================================ ! Make element information available to master PE - -use g_PARSUP -USE o_MESH - +subroutine gather_real4_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real32) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real32) :: arr2D(:) -real(real32) :: arr2D_global(:) -real(real32), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, e2D - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -1941,26 +1862,24 @@ subroutine gather_real4_elem2D(arr2D, arr2D_global) end subroutine gather_real4_elem2D -!============================================== -subroutine gather_int2_elem2D(arr2D, arr2D_global) - +!================================================ ! Make element information available to master PE - -use g_PARSUP -USE o_MESH - +subroutine gather_int2_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer(int16) :: arr2D(:) +integer(int16) :: arr2D_global(:) +integer(int16), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -integer(int16) :: arr2D(:) -integer(int16) :: arr2D_global(:) -integer(int16), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, e2D - - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -2003,29 +1922,26 @@ end subroutine gather_int2_elem2D !============================================================================ -subroutine gather_real8to4_nod3D(arr3D, arr3D_global) - ! Make nodal information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP -USE o_MESH - - +subroutine gather_real8to4_nod3D(arr3D, arr3D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D, ierr +real(real32), allocatable :: recvbuf(:,:) +real(real32), allocatable :: sendbuf(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n - -real(real64) :: arr3D(:,:) -real(real32) :: arr3D_global(:,:) -real(real32), allocatable :: recvbuf(:,:) -real(real32), allocatable :: sendbuf(:,:) -integer :: req(npes-1) -integer :: start, n3D, ierr - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -2070,23 +1986,22 @@ subroutine gather_real8to4_nod3D(arr3D, arr3D_global) end subroutine gather_real8to4_nod3D !============================================== -subroutine gather_real8to4_nod2D(arr2D, arr2D_global) - ! Make nodal information available to master PE - -use g_PARSUP -USE o_MESH - +subroutine gather_real8to4_nod2D(arr2D, arr2D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - -integer :: n - -real(real64) :: arr2D(:) -real(real32) :: arr2D_global(:) -real(real32) :: sendbuf(myDim_nod2D) -real(real64), allocatable :: recvbuf(:) -integer :: req(npes-1) -integer :: start, n2D +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32) :: sendbuf(partit%myDim_nod2D) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -2124,32 +2039,28 @@ subroutine gather_real8to4_nod2D(arr2D, arr2D_global) end if end subroutine gather_real8to4_nod2D -!============================================== !============================================================================ -subroutine gather_real8to4_elem3D(arr3D, arr3D_global) - +subroutine gather_real8to4_elem3D(arr3D, arr3D_global, partit) ! Make element information available to master PE -! ! Use only with 3D arrays stored in (vertical, horizontal) way - -use g_PARSUP -USE o_MESH - - +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D +real(real32), allocatable :: recvbuf(:,:) +real(real32), allocatable :: sendbuf(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" -INTEGER :: nl1 -integer :: n -real(real64) :: arr3D(:,:) -real(real32) :: arr3D_global(:,:) -real(real32), allocatable :: recvbuf(:,:) -real(real32), allocatable :: sendbuf(:,:) -integer :: req(npes-1) -integer :: start, e3D - - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) nl1=ubound(arr3D,1) @@ -2189,27 +2100,25 @@ subroutine gather_real8to4_elem3D(arr3D, arr3D_global) end if end subroutine gather_real8to4_elem3D -!============================================== -subroutine gather_real8to4_elem2D(arr2D, arr2D_global) - +!================================================ ! Make element information available to master PE - -use g_PARSUP -USE o_MESH - +subroutine gather_real8to4_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +real(real32) :: sendbuf(partit%myDim_elem2D) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" -integer :: n - -real(real64) :: arr2D(:) -real(real32) :: arr2D_global(:) -real(real32), allocatable :: recvbuf(:) -real(real32) :: sendbuf(myDim_elem2D) -integer :: req(npes-1) -integer :: start, e2D - - - if (npes> 1) then +if (npes> 1) then CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! @@ -2250,19 +2159,24 @@ subroutine gather_real8to4_elem2D(arr2D, arr2D_global) end if end subroutine gather_real8to4_elem2D !============================================== -subroutine gather_elem2D_i(arr2D, arr2D_global) +subroutine gather_elem2D_i(arr2D, arr2D_global, partit) ! Make element information available to master PE - use g_PARSUP - use o_MESH - IMPLICIT NONE - - integer :: n - integer :: arr2D(:) - integer :: arr2D_global(:) - integer, allocatable :: recvbuf(:) - integer :: req(npes-1) - integer :: start, e2D - CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer :: arr2D(:) +integer :: arr2D_global(:) +integer, allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) ! Consider MPI-datatypes to recv directly into arr2D_global! IF ( mype == 0 ) THEN if (npes > 1) then @@ -2284,22 +2198,22 @@ subroutine gather_elem2D_i(arr2D, arr2D_global) call MPI_SEND(arr2D, myDim_elem2D, MPI_INTEGER, 0, 2, MPI_COMM_FESOM, MPIerr ) ENDIF end subroutine gather_elem2D_i -!============================================================================ -subroutine gather_nod2D_i(arr2D, arr2D_global) - -! Make nodal information available to master PE - -use g_PARSUP -USE o_MESH - +!============================================== +! Make nodal information available to master PE +subroutine gather_nod2D_i(arr2D, arr2D_global, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - +type(t_partit), intent(inout), target :: partit integer :: n integer :: arr2D(:) integer :: arr2D_global(:) integer, allocatable :: recvbuf(:) -integer :: req(npes-1) +integer :: req(partit%npes-1) integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" if (npes> 1) then @@ -2339,19 +2253,20 @@ subroutine gather_nod2D_i(arr2D, arr2D_global) endif end subroutine gather_nod2D_i !============================================================================ -! -subroutine gather_edg2D(arr2D, arr2Dglobal) ! A 2D version of the previous routine -use g_PARSUP -USE o_MESH +subroutine gather_edg2D(arr2D, arr2Dglobal, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - -real(real64) :: arr2D(:) -real(real64) :: arr2Dglobal(:) - -integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf +type(t_partit), intent(in), target :: partit +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf REAL(real64), ALLOCATABLE, DIMENSION(:) :: rbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" IF ( mype == 0 ) THEN arr2Dglobal(myList_edge2D(1:myDim_edge2D))=arr2D(1:myDim_edge2D) @@ -2380,18 +2295,19 @@ subroutine gather_edg2D(arr2D, arr2Dglobal) end subroutine gather_edg2D ! !============================================================================ -! -subroutine gather_edg2D_i(arr2D, arr2Dglobal) ! A 2D version of the previous routine -use g_PARSUP -USE o_MESH +subroutine gather_edg2D_i(arr2D, arr2Dglobal, partit) +use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE - -integer :: arr2D(:) -integer :: arr2Dglobal(:) - -integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) -INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf, vbuf +type(t_partit), intent(inout), target :: partit +integer :: arr2D(:) +integer :: arr2Dglobal(:) +integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf, vbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" IF ( mype == 0 ) THEN arr2Dglobal(myList_edge2D(1:myDim_edge2D))=arr2D(1:myDim_edge2D) diff --git a/src/gen_ic3d.F90 b/src/gen_ic3d.F90 index 5bbce229d..f3fa32d34 100644 --- a/src/gen_ic3d.F90 +++ b/src/gen_ic3d.F90 @@ -13,8 +13,10 @@ MODULE g_ic3d !! USE o_ARRAYS USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER USE o_PARAM - USE g_PARSUP USE g_comm_auto USE g_support USE g_config, only: dummy, ClimateDataPath, use_cavity @@ -24,7 +26,7 @@ MODULE g_ic3d include 'netcdf.inc' public do_ic3d, & ! read and apply 3D initial conditions - n_ic3d, idlist, filelist, varlist, oce_init3d, & ! to be read from the namelist + n_ic3d, idlist, filelist, varlist, tracer_init3d, & ! to be read from the namelist t_insitu private @@ -39,7 +41,7 @@ MODULE g_ic3d character(MAX_PATH), save, dimension(ic_max) :: filelist character(50), save, dimension(ic_max) :: varlist - namelist / oce_init3d / n_ic3d, idlist, filelist, varlist, t_insitu + namelist / tracer_init3d / n_ic3d, idlist, filelist, varlist, t_insitu character(MAX_PATH), save :: filename character(50), save :: varname @@ -63,33 +65,33 @@ MODULE g_ic3d !============== NETCDF ========================================== CONTAINS - SUBROUTINE nc_readGrid + SUBROUTINE nc_readGrid(partit) ! Read time array and grid from nc file IMPLICIT NONE - - integer :: iost !I/O status - integer :: ncid ! netcdf file id - integer :: i + type(t_partit), intent(inout) :: partit + integer :: iost !I/O status + integer :: ncid ! netcdf file id + integer :: i ! ID dimensions and variables: - integer :: id_lon - integer :: id_lat - integer :: id_lond - integer :: id_latd - integer :: id_depth - integer :: id_depthd - integer :: nf_start(4) - integer :: nf_edges(4) - integer :: ierror ! return error code + integer :: id_lon + integer :: id_lat + integer :: id_lond + integer :: id_latd + integer :: id_depth + integer :: id_depthd + integer :: nf_start(4) + integer :: nf_edges(4) + integer :: ierror ! return error code !open file - if (mype==0) then + if (partit%mype==0) then iost = nf_open(trim(filename),NF_NOWRITE,ncid) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) ! get dimensions - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "LAT", id_latd) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "lat", id_latd) @@ -98,9 +100,9 @@ SUBROUTINE nc_readGrid iost = nf_inq_dimid(ncid, "latitude", id_latd) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "LON", id_lond) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "longitude", id_lond) @@ -109,18 +111,18 @@ SUBROUTINE nc_readGrid iost = nf_inq_dimid(ncid, "lon", id_lond) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "depth", id_depthd) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) ! get variable id - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_varid(ncid, "LAT", id_lat) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "lat", id_lat) @@ -129,7 +131,7 @@ SUBROUTINE nc_readGrid iost = nf_inq_varid(ncid, "latitude", id_lat) end if end if - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_varid(ncid, "LON", id_lon) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "longitude", id_lon) @@ -138,75 +140,75 @@ SUBROUTINE nc_readGrid iost = nf_inq_varid(ncid, "lon", id_lon) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_varid(ncid, "depth", id_depth) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) ! get dimensions size - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_latd, nc_Nlat) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_lond, nc_Nlon) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_depthd, nc_Ndepth) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) nc_Nlon=nc_Nlon+2 !for the halo in case of periodic boundary - call MPI_BCast(nc_Nlon, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(nc_Nlat, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(nc_Ndepth, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_Nlon, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_Nlat, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_Ndepth, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) ALLOCATE( nc_lon(nc_Nlon), nc_lat(nc_Nlat),& & nc_depth(nc_Ndepth)) !read variables from file ! coordinates - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=nc_Nlat iost = nf_get_vara_double(ncid, id_lat, nf_start, nf_edges, nc_lat) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=nc_Nlon-2 iost = nf_get_vara_double(ncid, id_lon, nf_start, nf_edges, nc_lon(2:nc_Nlon-1)) nc_lon(1) =nc_lon(nc_Nlon-1) nc_lon(nc_Nlon) =nc_lon(2) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) ! depth - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=nc_Ndepth iost = nf_get_vara_double(ncid, id_depth, nf_start, nf_edges,nc_depth) if (nc_depth(2) < 0.) nc_depth=-nc_depth end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) - call MPI_BCast(nc_lon, nc_Nlon, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(nc_lat, nc_Nlat, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(nc_depth, nc_Ndepth, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_lon, nc_Nlon, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_lat, nc_Nlat, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(nc_depth, nc_Ndepth, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) - if (mype==0) then + if (partit%mype==0) then iost = nf_close(ncid) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,filename,partit) if (ic_cyclic) then nc_lon(1) =nc_lon(1)-360. @@ -215,28 +217,30 @@ SUBROUTINE nc_readGrid END SUBROUTINE nc_readGrid - SUBROUTINE nc_ic3d_ini(mesh) + SUBROUTINE nc_ic3d_ini(partit, mesh) !!--------------------------------------------------------------------- !! ** Purpose : inizialization of ocean forcing from NETCDF file !!---------------------------------------------------------------------- IMPLICIT NONE - - integer :: i - integer :: elnodes(3) - real(wp) :: x, y ! coordinates of elements + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: i + integer :: elnodes(3) + real(wp) :: x, y ! coordinates of elements real(kind=WP), allocatable,dimension(:,:) :: cav_nrst_xyz - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" warn = 0 if (mype==0) then - write(*,*) 'reading input tracer file for tracer ID= ', tracer_ID(current_tracer) - write(*,*) 'input file: ', trim(filename) + write(*,*) 'reading ', trim(filename) write(*,*) 'variable : ', trim(varname) end if - call nc_readGrid + call nc_readGrid(partit) ! prepare nearest coordinates in INfile , save to bilin_indx_i/j !_________________________________________________________________________ @@ -296,7 +300,7 @@ SUBROUTINE nc_ic3d_ini(mesh) end if END SUBROUTINE nc_ic3d_ini - SUBROUTINE getcoeffld(mesh) + SUBROUTINE getcoeffld(tracers, partit, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE getcoeffld *** !! @@ -304,44 +308,56 @@ SUBROUTINE getcoeffld(mesh) !! ** Method : !! ** Action : !!---------------------------------------------------------------------- + + USE ieee_arithmetic IMPLICIT NONE - - integer :: iost !I/O status - integer :: ncid ! netcdf file id + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: iost !I/O status + integer :: ncid ! netcdf file id ! ID dimensions and variables: - integer :: id_data - integer :: nf_start(4) - integer :: nf_edges(4) - integer :: fld_idx, i,j,ii, ip1, jp1, k - integer :: d_indx, d_indx_p1 ! index of neares - real(wp) :: cf_a, cf_b, delta_d - integer :: nl1, ul1 - real(wp) :: denom, x1, x2, y1, y2, x, y, d1,d2, aux_z - - real(wp), allocatable, dimension(:,:,:) :: ncdata - real(wp), allocatable, dimension(:) :: data1d - integer :: elnodes(3) - integer :: ierror ! return error code - - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" + integer :: id_data + integer :: nf_start(4) + integer :: nf_edges(4) + integer :: fld_idx, i,j,ii, ip1, jp1, k + integer :: d_indx, d_indx_p1 ! index of neares + real(wp) :: cf_a, cf_b, delta_d + integer :: nl1, ul1 + real(wp) :: denom, x1, x2, y1, y2, x, y, d1,d2, aux_z + real(wp), allocatable, dimension(:,:,:) :: ncdata + real(wp), allocatable, dimension(:) :: data1d + integer :: elnodes(3) + integer :: ierror ! return error code + integer :: NO_FILL ! 0=no fillval, 1=fillval + real(wp) :: FILL_VALUE +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ALLOCATE(ncdata(nc_Nlon,nc_Nlat,nc_Ndepth), data1d(nc_Ndepth)) ncdata=0.0_WP data1d=0.0_WP - tr_arr(:,:,current_tracer)=dummy + tracers%data(current_tracer)%values(:,:)=dummy !open NETCDF file on 0 core if (mype==0) then iost = nf_open(filename,NF_NOWRITE,ncid) end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call check_nferr(iost,filename,partit) ! get variable id if (mype==0) then iost = nf_inq_varid(ncid, varname, id_data) + iost = nf_inq_var_fill(ncid, id_data, NO_FILL, FILL_VALUE) ! FillValue defined? + if (NO_FILL==1) then + print *, 'No _FillValue is set in ', trim(filename), ', trying dummy =', dummy, FILL_VALUE + else + print *, 'The FillValue in ', trim(filename), ' is set to ', FILL_VALUE ! should set dummy accordingly + end if end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call check_nferr(iost,filename,partit) !read data from file if (mype==0) then nf_start(1)=1 @@ -353,14 +369,24 @@ SUBROUTINE getcoeffld(mesh) iost = nf_get_vara_double(ncid, id_data, nf_start, nf_edges, ncdata(2:nc_Nlon-1,:,:)) ncdata(1,:,:) =ncdata(nc_Nlon-1,:,:) ncdata(nc_Nlon,:,:)=ncdata(2,:,:) - where (ncdata < -0.99_WP*dummy ) ! dummy values are only positive - ncdata = dummy - end where + + ! replace nan (or fillvalue) by dummy value + do k=1,nc_Ndepth + do j=1,nc_Nlat + do i=1,nc_Nlon + if (ieee_is_nan(ncdata(i,j,k)) .or. (ncdata(i,j,k)==FILL_VALUE)) then + ncdata(i,j,k) = dummy + elseif (ncdata(i,j,k) < -0.99_WP*dummy .or. ncdata(i,j,k) > dummy) then + ! and in case the input data has other conventions on missing values: + ncdata(i,j,k) = dummy + endif + end do + end do + end do end if call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,filename) + call check_nferr(iost,filename,partit) call MPI_BCast(ncdata, nc_Nlon*nc_Nlat*nc_Ndepth, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - ! bilinear space interpolation, ! data is assumed to be sampled on a regular grid do ii = 1, myDim_nod2d @@ -370,8 +396,6 @@ SUBROUTINE getcoeffld(mesh) j = bilin_indx_j(ii) ip1 = i + 1 jp1 = j + 1 -!!PS x = geo_coord_nod2D(1,ii)/rad -!!PS y = geo_coord_nod2D(2,ii)/rad !______________________________________________________________________ ! its a cavity node use extrapolation points of closest cavity line point ! exchange the coordinates of the cavity node with the coordinates of the @@ -426,11 +450,11 @@ SUBROUTINE getcoeffld(mesh) cf_a = (d2 - d1)/ delta_d ! value of interpolated OB data on Z from model cf_b = d1 - cf_a * nc_depth(d_indx) - !!PS tr_arr(k,ii,current_tracer) = -cf_a * Z_3d_n(k,ii) + cf_b - tr_arr(k,ii,current_tracer) = -cf_a * aux_z + cf_b + !!PS tracers%data(current_tracer)%values(k,ii) = -cf_a * Z_3d_n(k,ii) + cf_b + tracers%data(current_tracer)%values(k,ii) = -cf_a * aux_z + cf_b end if elseif (d_indx==0) then - tr_arr(k,ii,current_tracer)=data1d(1) + tracers%data(current_tracer)%values(k,ii)=data1d(1) end if enddo !___________________________________________________________________ @@ -450,10 +474,10 @@ SUBROUTINE getcoeffld(mesh) cf_a = (d2 - d1)/ delta_d ! value of interpolated OB data on Z from model cf_b = d1 - cf_a * nc_depth(d_indx) - tr_arr(k,ii,current_tracer) = -cf_a * Z_3d_n(k,ii) + cf_b + tracers%data(current_tracer)%values(k,ii) = -cf_a * Z_3d_n(k,ii) + cf_b end if elseif (d_indx==0) then - tr_arr(k,ii,current_tracer)=data1d(1) + tracers%data(current_tracer)%values(k,ii)=data1d(1) end if enddo end if ! --> if (use_cavity) then @@ -466,136 +490,97 @@ SUBROUTINE getcoeffld(mesh) DEALLOCATE( ncdata, data1d ) END SUBROUTINE getcoeffld - SUBROUTINE do_ic3d(mesh) + SUBROUTINE do_ic3d(tracers, partit, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE do_ic3d *** !! !! ** Purpose : read 3D initial conditions for tracers from netcdf and interpolate on model grid !!---------------------------------------------------------------------- + USE insitu2pot_interface IMPLICIT NONE - integer :: n, i - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: locTmax, locTmin, locSmax, locSmin, glo + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: n, i + real(kind=WP) :: locTmax, locTmin, locSmax, locSmin, glo - if (mype==0) write(*,*) "Start: Initial conditions for tracers" + if (partit%mype==0) write(*,*) "Start: Initial conditions for tracers" - ALLOCATE(bilin_indx_i(myDim_nod2d+eDim_nod2D), bilin_indx_j(myDim_nod2d+eDim_nod2D)) + ALLOCATE(bilin_indx_i(partit%myDim_nod2d+partit%eDim_nod2D), bilin_indx_j(partit%myDim_nod2d+partit%eDim_nod2D)) DO n=1, n_ic3d filename=trim(ClimateDataPath)//trim(filelist(n)) varname =trim(varlist(n)) - DO current_tracer=1, num_tracers - if (tracer_ID(current_tracer)==idlist(n)) then + DO current_tracer=1, tracers%num_tracers + if (tracers%data(current_tracer)%ID==idlist(n)) then ! read initial conditions for current tracer - call nc_ic3d_ini(mesh) + call nc_ic3d_ini(partit, mesh) ! get first coeficients for time inerpolation on model grid for all datas - call getcoeffld(mesh) + call getcoeffld(tracers, partit, mesh) call nc_end ! deallocate arrqays associated with netcdf file - call extrap_nod(tr_arr(:,:,current_tracer), mesh) + call extrap_nod(tracers%data(current_tracer)%values(:,:), partit, mesh) exit - elseif (current_tracer==num_tracers) then - if (mype==0) write(*,*) "idlist contains tracer which is not listed in tracer_id!" - if (mype==0) write(*,*) "check your namelists!" - call par_ex + elseif (current_tracer==tracers%num_tracers) then + if (partit%mype==0) write(*,*) "idlist contains tracer which is not listed in tracer_id!" + if (partit%mype==0) write(*,*) "check your namelists!" + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if END DO END DO DEALLOCATE(bilin_indx_i, bilin_indx_j) - !_________________________________________________________________________ - ! set remaining dummy values from bottom topography to 0.0_WP - where (tr_arr > 0.9_WP*dummy) - tr_arr=0.0_WP - end where - + do current_tracer=1, tracers%num_tracers + !_________________________________________________________________________ + ! set remaining dummy values from bottom topography to 0.0_WP + where (tracers%data(current_tracer)%values > 0.9_WP*dummy) + tracers%data(current_tracer)%values=0.0_WP + end where + + !_________________________________________________________________________ + ! eliminate values within cavity that result from the extrapolation of + ! initialisation + do n=1,partit%myDim_nod2d + partit%eDim_nod2D + ! ensure cavity is zero + if (use_cavity) tracers%data(current_tracer)%values(1:mesh%ulevels_nod2D(n)-1,n)=0.0_WP + ! ensure bottom is zero + tracers%data(current_tracer)%values(mesh%nlevels_nod2D(n):mesh%nl-1,n)=0.0_WP + end do + end do !_________________________________________________________________________ ! convert temperature from Kelvin --> °C - where (tr_arr(:,:,1) > 100._WP) - tr_arr(:,:,1)=tr_arr(:,:,1)-273.15_WP + where (tracers%data(1)%values(:,:) > 100._WP) + tracers%data(1)%values(:,:) = tracers%data(1)%values(:,:)-273.15_WP end where - !_________________________________________________________________________ - ! eliminate values within cavity that result from the extrapolation of - ! initialisation - do n=1,myDim_nod2d + eDim_nod2D - ! ensure cavity is zero - if (use_cavity) tr_arr(1:mesh%ulevels_nod2D(n)-1,n,:)=0.0_WP - ! ensure bottom is zero - tr_arr(mesh%nlevels_nod2D(n):mesh%nl-1,n,:)=0.0_WP - end do - !_________________________________________________________________________ if (t_insitu) then - if (mype==0) write(*,*) "converting insitu temperature to potential..." - call insitu2pot(mesh) + if (partit%mype==0) write(*,*) "converting insitu temperature to potential..." + call insitu2pot(tracers, partit, mesh) end if - if (mype==0) write(*,*) "DONE: Initial conditions for tracers" - - !_________________________________________________________________________ - ! Homogenous temp salt initialisation --> for testing and debuging -!!PS do n=1,myDim_nod2d + eDim_nod2D -!!PS tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1) = 16.0 -!!PS tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2) = 35.0 -!!PS end do - + if (partit%mype==0) write(*,*) "DONE: Initial conditions for tracers" !_________________________________________________________________________ ! check initial fields locTmax = -6666 locTmin = 6666 locSmax = locTmax locSmin = locTmin - do n=1,myDim_nod2d -!!PS if (any( tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2)>0.99_WP*dummy)) then -!!PS write(*,*) '____________________________________________________________' -!!PS write(*,*) ' --> check init fields SALT >0.99_WP*dummy' -!!PS write(*,*) 'mype =',mype -!!PS write(*,*) 'n =',n -!!PS write(*,*) 'lon,lat =',mesh%geo_coord_nod2D(:,n)/rad -!!PS write(*,*) 'mesh%ulevels_nod2D(n) =',mesh%ulevels_nod2D(n) -!!PS write(*,*) 'mesh%nlevels_nod2D(n) =',mesh%nlevels_nod2D(n) -!!PS write(*,*) 'tr_arr(unl:lnl,n,2) =',tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2) -!!PS write(*,*) 'tr_arr( 1:lnl,n,2) =',tr_arr(1:mesh%nlevels_nod2D(n)-1,n,2) -!!PS end if -!!PS if (any( tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1)>0.99_WP*dummy)) then -!!PS write(*,*) '____________________________________________________________' -!!PS write(*,*) ' --> check init fields TEMP >0.99_WP*dummy' -!!PS write(*,*) 'mype =',mype -!!PS write(*,*) 'n =',n -!!PS write(*,*) 'lon,lat =',mesh%geo_coord_nod2D(:,n)/rad -!!PS write(*,*) 'mesh%ulevels_nod2D(n) =',mesh%ulevels_nod2D(n) -!!PS write(*,*) 'mesh%nlevels_nod2D(n) =',mesh%nlevels_nod2D(n) -!!PS write(*,*) 'tr_arr(:,n,1) =',tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1) -!!PS end if - locTmax = max(locTmax,maxval(tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1)) ) - locTmin = min(locTmin,minval(tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,1)) ) - locSmax = max(locSmax,maxval(tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2)) ) - locSmin = min(locSmin,minval(tr_arr(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n,2)) ) + do n=1, partit%myDim_nod2d + locTmax = max(locTmax,maxval(tracers%data(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locTmin = min(locTmin,minval(tracers%data(1)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locSmax = max(locSmax,maxval(tracers%data(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) + locSmin = min(locSmin,minval(tracers%data(2)%values(mesh%ulevels_nod2D(n):mesh%nlevels_nod2D(n)-1,n)) ) end do - call MPI_AllREDUCE(locTmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (mype==0) write(*,*) ' |-> gobal max init. temp. =', glo - call MPI_AllREDUCE(locTmin , glo , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - if (mype==0) write(*,*) ' |-> gobal min init. temp. =', glo - call MPI_AllREDUCE(locSmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (mype==0) write(*,*) ' |-> gobal max init. salt. =', glo - call MPI_AllREDUCE(locSmin , glo , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - if (mype==0) write(*,*) ' `-> gobal min init. salt. =', glo - + call MPI_AllREDUCE(locTmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, partit%MPI_COMM_FESOM, partit%MPIerr) + if (partit%mype==0) write(*,*) ' |-> gobal max init. temp. =', glo + call MPI_AllREDUCE(locTmin , glo , 1, MPI_DOUBLE_PRECISION, MPI_MIN, partit%MPI_COMM_FESOM, partit%MPIerr) + if (partit%mype==0) write(*,*) ' |-> gobal min init. temp. =', glo + call MPI_AllREDUCE(locSmax , glo , 1, MPI_DOUBLE_PRECISION, MPI_MAX, partit%MPI_COMM_FESOM, partit%MPIerr) + if (partit%mype==0) write(*,*) ' |-> gobal max init. salt. =', glo + call MPI_AllREDUCE(locSmin , glo , 1, MPI_DOUBLE_PRECISION, MPI_MIN, partit%MPI_COMM_FESOM, partit%MPIerr) + if (partit%mype==0) write(*,*) ' `-> gobal min init. salt. =', glo END SUBROUTINE do_ic3d - - SUBROUTINE err_call(iost,fname) - !!--------------------------------------------------------------------- - !! *** ROUTINE err_call *** - !!---------------------------------------------------------------------- - IMPLICIT NONE - integer, intent(in) :: iost - character(len=MAX_PATH), intent(in) :: fname - write(*,*) 'ERROR: I/O status=',iost,' file= ',fname - call par_ex - stop - END SUBROUTINE err_call - - + SUBROUTINE nc_end IMPLICIT NONE @@ -604,13 +589,14 @@ SUBROUTINE nc_end END SUBROUTINE nc_end - SUBROUTINE check_nferr(iost,fname) + SUBROUTINE check_nferr(iost,fname, partit) IMPLICIT NONE - character(len=MAX_PATH), intent(in) :: fname - integer, intent(in) :: iost + type(t_partit), intent(inout) :: partit + character(len=MAX_PATH), intent(in) :: fname + integer, intent(in) :: iost if (iost .ne. NF_NOERR) then write(*,*) 'ERROR: I/O status= "',trim(nf_strerror(iost)),'";',iost,' file= ', trim(fname) - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif END SUBROUTINE diff --git a/src/gen_interpolation.F90 b/src/gen_interpolation.F90 index 4d877a0c3..e2adbd26e 100755 --- a/src/gen_interpolation.F90 +++ b/src/gen_interpolation.F90 @@ -1,7 +1,7 @@ ! routines doing 3D, 2D and 1D interpolation subroutine interp_2d_field_v2(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, missvalue, & - num_mod, lon_mod, lat_mod, data_mod) + num_mod, lon_mod, lat_mod, data_mod, partit) !------------------------------------------------------------------------------------- ! A second version of 2D interpolation. ! This routine does 2d interpolation from a regular grid to specified nodes @@ -29,12 +29,14 @@ subroutine interp_2d_field_v2(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_r ! Coded by Qiang Wang ! Reviewed by ?? !------------------------------------------------------------------------------------- - use g_PARSUP, only: par_ex + USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM, only: WP implicit none integer :: n, i, ii, jj, k, nod_find integer :: ind_lat_h, ind_lat_l, ind_lon_h, ind_lon_l - integer, intent(in) :: num_lon_reg, num_lat_reg, num_mod + integer, intent(in) :: num_lon_reg, num_lat_reg, num_mod + type(t_partit), intent(inout) :: partit real(kind=WP) :: x, y, diff, d, dmin real(kind=WP) :: rt_lat1, rt_lat2, rt_lon1, rt_lon2 real(kind=WP) :: data(2,2) @@ -47,7 +49,7 @@ subroutine interp_2d_field_v2(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_r if(lon_reg(1)<0.0 .or. lon_reg(num_lon_reg)>360.) then write(*,*) 'Error in 2D interpolation!' write(*,*) 'The regular grid is not in the proper longitude range.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) stop end if @@ -135,7 +137,7 @@ end subroutine interp_2d_field_v2 !--------------------------------------------------------------------------- ! subroutine interp_2d_field(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, & - num_mod, lon_mod, lat_mod, data_mod, phase_flag) + num_mod, lon_mod, lat_mod, data_mod, phase_flag, partit) !------------------------------------------------------------------------------------- ! This routine does 2d interpolation from a regular grid to specified nodes ! on the surface grid. The regular grid is assumed to be global. @@ -162,17 +164,19 @@ subroutine interp_2d_field(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, ! Coded by Qiang Wang ! Reviewed by ?? !------------------------------------------------------------------------------------- - use g_PARSUP, only: par_ex + USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM, only: WP implicit none integer :: n, i integer :: ind_lat_h, ind_lat_l, ind_lon_h, ind_lon_l integer, intent(in) :: num_lon_reg, num_lat_reg, num_mod integer, intent(in) :: phase_flag + type(t_partit), intent(inout) :: partit real(kind=WP) :: x, y, diff real(kind=WP) :: rt_lat1, rt_lat2, rt_lon1, rt_lon2 - real(kind=WP) :: data_ll, data_lh, data_hl, data_hh - real(kind=WP) :: data_lo, data_up + real(kind=WP) :: data_ll, data_lh, data_hl, data_hh + real(kind=WP) :: data_lo, data_up real(kind=WP), intent(in) :: lon_reg(num_lon_reg), lat_reg(num_lat_reg) real(kind=WP), intent(in) :: data_reg(num_lon_reg, num_lat_reg) real(kind=WP), intent(in) :: lon_mod(num_mod), lat_mod(num_mod) @@ -181,7 +185,7 @@ subroutine interp_2d_field(num_lon_reg, num_lat_reg, lon_reg, lat_reg, data_reg, if(lon_reg(1)<0.0_WP .or. lon_reg(num_lon_reg)>360._WP) then write(*,*) 'Error in 2D interpolation!' write(*,*) 'The regular grid is not in the proper longitude range.' - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if @@ -281,7 +285,7 @@ end subroutine interp_2d_field ! subroutine interp_3d_field(num_lon_reg, num_lat_reg, num_lay_reg, & lon_reg, lat_reg, lay_reg, data_reg, & - num_mod_z, num_mod, lon_mod, lat_mod, lay_mod, data_mod, mesh) + num_mod_z, num_mod, lon_mod, lat_mod, lay_mod, data_mod, partit, mesh) !------------------------------------------------------------------------------------- ! This routine does 3d interpolation from a regular grid to specified nodes. ! The regular grid is assumed to be global. @@ -311,8 +315,9 @@ subroutine interp_3d_field(num_lon_reg, num_lat_reg, num_lay_reg, & ! Reviewed by ?? !------------------------------------------------------------------------------------- use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_param, only: WP - use g_parsup implicit none integer :: n, i, flag,nz integer :: ind_lat_h, ind_lat_l, ind_lon_h, ind_lon_l @@ -330,9 +335,12 @@ subroutine interp_3d_field(num_lon_reg, num_lat_reg, num_lay_reg, & real(kind=WP), intent(in) :: data_reg(num_lon_reg, num_lat_reg, num_lay_reg) real(kind=WP), intent(in) :: lon_mod(num_mod), lat_mod(num_mod), lay_mod(num_mod) real(kind=WP), intent(out) :: data_mod(num_mod_z,num_mod) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" do n=1,num_mod !!PS do nz=1,nlevels_nod2D(n)-1 diff --git a/src/gen_model_setup.F90 b/src/gen_model_setup.F90 index 755b6abf7..4f78d5610 100755 --- a/src/gen_model_setup.F90 +++ b/src/gen_model_setup.F90 @@ -1,43 +1,35 @@ ! ============================================================== -subroutine setup_model - implicit none - call read_namelist ! should be before clock_init -end subroutine setup_model -! ============================================================== -subroutine read_namelist - ! Reads namelist files and overwrites default parameters. - ! - ! Coded by Lars Nerger - ! Modified by Qiang Wang, SD - !-------------------------------------------------------------- +subroutine setup_model(partit) + USE MOD_PARTIT + USE MOD_PARSUP use o_param - use i_param - use i_therm_param +! use i_therm_param use g_forcing_param - use g_parsup use g_config use diagnostics, only: ldiag_solver,lcurt_stress_surf,lcurt_stress_surf, ldiag_energy, & ldiag_dMOC, ldiag_DVD, diag_list - use g_clock, only: timenew, daynew, yearnew + use g_clock, only: timenew, daynew, yearnew use g_ic3d implicit none + type(t_partit), intent(inout), target :: partit + character(len=MAX_PATH) :: nmlfile + integer fileunit - character(len=MAX_PATH) :: nmlfile namelist /clockinit/ timenew, daynew, yearnew nmlfile ='namelist.config' ! name of general configuration namelist file - open (20,file=nmlfile) - read (20,NML=modelname) - read (20,NML=timestep) - read (20,NML=clockinit) - read (20,NML=paths) - read (20,NML=restart_log) - read (20,NML=ale_def) - read (20,NML=geometry) - read (20,NML=calendar) - read (20,NML=run_config) -!!$ read (20,NML=machine) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=modelname) + read (fileunit, NML=timestep) + read (fileunit, NML=clockinit) + read (fileunit, NML=paths) + read (fileunit, NML=restart_log) + read (fileunit, NML=ale_def) + read (fileunit, NML=geometry) + read (fileunit, NML=calendar) + read (fileunit, NML=run_config) +!!$ read (fileunit, NML=machine) + close (fileunit) ! ========== ! compute dt ! ========== @@ -54,37 +46,40 @@ subroutine read_namelist ! ================================= nmlfile ='namelist.oce' ! name of ocean namelist file - open (20,file=nmlfile) - read (20,NML=oce_dyn) - read (20,NML=oce_tra) - read (20,NML=oce_init3d) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=oce_dyn) + close (fileunit) + + nmlfile ='namelist.tra' ! name of ocean namelist file + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=tracer_phys) + close (fileunit) nmlfile ='namelist.forcing' ! name of forcing namelist file - open (20,file=nmlfile) - read (20,NML=forcing_exchange_coeff) - read (20,NML=forcing_bulk) - read (20,NML=land_ice) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=forcing_exchange_coeff) + read (fileunit, NML=forcing_bulk) + read (fileunit, NML=land_ice) + close (fileunit) - if(use_ice) then - nmlfile ='namelist.ice' ! name of ice namelist file - open (20,file=nmlfile) - read (20,NML=ice_dyn) - read (20,NML=ice_therm) - close (20) - endif +! if(use_ice) then +! nmlfile ='namelist.ice' ! name of ice namelist file +! open (newunit=fileunit, file=nmlfile) +! ! read (fileunit, NML=ice_dyn) +! read (fileunit, NML=ice_therm) +! close (fileunit) +! endif nmlfile ='namelist.io' ! name of forcing namelist file - open (20,file=nmlfile) - read (20,NML=diag_list) - close (20) + open (newunit=fileunit, file=nmlfile) + read (fileunit, NML=diag_list) + close (fileunit) - if(mype==0) write(*,*) 'Namelist files are read in' + if(partit%mype==0) write(*,*) 'Namelist files are read in' !_____________________________________________________________________________ ! Check for namelist parameter consistency - if(mype==0) then + if(partit%mype==0) then ! check for valid step per day number if (mod(86400,step_per_day)==0) then @@ -108,25 +103,26 @@ subroutine read_namelist write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) endif endif - ! if ((output_length_unit=='s').or.(int(real(step_per_day)/24.0)<=1)) use_means=.false. -end subroutine read_namelist +end subroutine setup_model ! ================================================================= -subroutine get_run_steps(nsteps) +subroutine get_run_steps(nsteps, partit) ! Coded by Qiang Wang ! Reviewed by ?? - !-------------------------------------------------------------- - + !-------------------------------------------------------------- use g_clock - use g_parsup + USE MOD_PARTIT + USE MOD_PARSUP implicit none - integer :: i, temp_year, temp_mon, temp_fleapyear, nsteps + type(t_partit), intent(inout) :: partit + integer, intent(inout) :: nsteps + integer :: i, temp_year, temp_mon, temp_fleapyear ! clock should have been inialized before calling this routine @@ -158,11 +154,11 @@ subroutine get_run_steps(nsteps) else write(*,*) 'Run length unit ', run_length_unit, ' is not defined.' write(*,*) 'Please check and update the code.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) stop end if - if(mype==0) write(*,*) nsteps, ' steps to run for ', runid, ' job submission' + if(partit%mype==0) write(*,*) nsteps, ' steps to run for ', runid, ' job submission' end subroutine get_run_steps diff --git a/src/gen_modules_backscatter.F90 b/src/gen_modules_backscatter.F90 new file mode 100644 index 000000000..1a3c620cd --- /dev/null +++ b/src/gen_modules_backscatter.F90 @@ -0,0 +1,411 @@ +module g_backscatter + + !___________________________________________________________________________ + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + + !___________________________________________________________________________ + USE o_ARRAYS, only: bvfreq + + !___________________________________________________________________________ + USE o_param + USE g_CONFIG + USE g_comm_auto + USE g_support + USE g_rotate_grid + IMPLICIT NONE + + !___________________________________________________________________________ + ! allocate backscatter arrays + real(kind=WP), allocatable, dimension(:,:) :: v_back + real(kind=WP), allocatable, dimension(:,:) :: uke, uke_back, uke_dis, uke_dif + real(kind=WP), allocatable, dimension(:,:) :: uke_rhs, uke_rhs_old + real(kind=WP), allocatable, dimension(:,:) :: UV_dis_posdef_b2, UV_dis_posdef, UV_back_posdef + real(kind=WP), allocatable, dimension(:,:,:):: UV_back, UV_dis + real(kind=WP), allocatable, dimension(:,:,:):: UV_dis_tend, UV_total_tend, UV_back_tend + + contains + ! + ! + !___________________________________________________________________________ + ! allocate/initialise backscatter arrays + subroutine init_backscatter(partit, mesh) + implicit none + integer :: elem_size + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + elem_size = myDim_elem2D + eDim_elem2D + allocate(v_back( nl-1, elem_size)) ! Backscatter viscosity + allocate(uke( nl-1, elem_size)) ! Unresolved kinetic energy for backscatter coefficient + allocate(uke_dis( nl-1, elem_size)) + allocate(uke_back( nl-1, elem_size)) + allocate(uke_dif( nl-1, elem_size)) + allocate(uke_rhs( nl-1, elem_size)) + allocate(uke_rhs_old( nl-1, elem_size)) + allocate(UV_dis( 2, nl-1, elem_size)) + allocate(UV_back( 2, nl-1, elem_size)) + allocate(UV_dis_tend( 2, nl-1, elem_size)) + allocate(UV_back_tend( 2, nl-1, elem_size)) + allocate(UV_total_tend(2, nl-1, elem_size)) + uke = 0.0_WP + v_back = 0.0_WP + uke_dis = 0.0_WP + uke_dif = 0.0_WP + uke_back = 0.0_WP + uke_rhs = 0.0_WP + uke_rhs_old = 0.0_WP + UV_dis = 0.0_WP + UV_dis_tend = 0.0_WP + UV_back = 0.0_WP + UV_back_tend = 0.0_WP + UV_total_tend = 0.0_WP + + end subroutine init_backscatter + + ! + ! + !_______________________________________________________________________________ + subroutine visc_filt_dbcksc(dynamics, partit, mesh) + IMPLICIT NONE + + real(kind=WP) :: u1, v1, le(2), len, crosslen, vi, uke1 + integer :: nz, ed, el(2) + real(kind=WP) , allocatable :: uke_d(:,:) + !!PS real(kind=WP) , allocatable :: UV_back(:,:,:), UV_dis(:,:,:) + real(kind=WP) , allocatable :: uuu(:) + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP) , dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP) , dimension(:,:) , pointer :: U_c, V_c +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) + + ! An analog of harmonic viscosity operator. + ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area + ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. + ! The contribution from boundary edges is neglected (free slip). + ! Filter is applied twice. + ed=myDim_elem2D+eDim_elem2D + !!PS allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) + allocate(uke_d(nl-1,ed)) + allocate(uuu(ed)) + UV_back= 0.0_WP + UV_dis = 0.0_WP + uke_d = 0.0_WP + U_c = 0.0_WP + V_c = 0.0_WP + + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + DO nz=1,minval(nlevels(el))-1 + u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) + v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) + + U_c(nz,el(1))=U_c(nz,el(1))-u1 + U_c(nz,el(2))=U_c(nz,el(2))+u1 + V_c(nz,el(1))=V_c(nz,el(1))-v1 + V_c(nz,el(2))=V_c(nz,el(2))+v1 + END DO + END DO + + Do ed=1,myDim_elem2D + len=sqrt(elem_area(ed)) + len=dt*len/30.0_WP + Do nz=1,nlevels(ed)-1 + ! vi has the sense of harmonic viscosity coefficient because of + ! the division by area in the end + ! ==== + ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) + ! ==== + vi=max(0.2_WP,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len + U_c(nz,ed)=-U_c(nz,ed)*vi + V_c(nz,ed)=-V_c(nz,ed)*vi + END DO + end do + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) + + DO ed=1, myDim_edge2D+eDim_edge2D + if(myList_edge2D(ed)>edge2D_in) cycle + el=edge_tri(:,ed) + le=edge_dxdy(:,ed) + le(1)=le(1)*sum(elem_cos(el))*0.25_WP + len=sqrt(le(1)**2+le(2)**2)*r_earth + le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) + le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) + crosslen=sqrt(le(1)**2+le(2)**2) + + DO nz=1,minval(nlevels(el))-1 + vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen + !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed + !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed + !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where + ! Visc is small and decoupling might happen + !Backscatter contribution + u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi + v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi + + !UKE diffusion + vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen + uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi + + UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) + UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) + UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) + UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) + + !Correct scaling for the diffusion? + uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) + uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) + + !Biharmonic contribution + u1=(U_c(nz,el(1))-U_c(nz,el(2))) + v1=(V_c(nz,el(1))-V_c(nz,el(2))) + + UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) + UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) + UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) + UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) + + END DO + END DO + call exchange_elem(UV_back, partit) + + DO nz=1, nl-1 + uuu=0.0_WP + uuu=UV_back(1,nz,:) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) + UV_back(1,nz,:)=uuu + uuu=0.0_WP + uuu=UV_back(2,nz,:) + call smooth_elem(uuu,smooth_back_tend, partit, mesh) + UV_back(2,nz,:)=uuu + END DO + + DO ed=1, myDim_elem2D + DO nz=1,nlevels(ed)-1 + UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) + UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) + END DO + END DO + + UV_dis_tend=UV_dis!+UV_back + UV_total_tend=UV_dis+UV_back + UV_back_tend=UV_back + uke_dif=uke_d + + call uke_update(dynamics, partit, mesh) + + !!PS deallocate(UV_dis,UV_back) + deallocate(uke_d) + deallocate(uuu) + end subroutine visc_filt_dbcksc + + ! + ! + !_______________________________________________________________________________ + subroutine backscatter_coef(partit, mesh) + IMPLICIT NONE + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, nz +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !Potentially add the Rossby number scaling to the script... + !check if sign is right! Different in the Jansen paper + !Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) + v_back=0.0_WP + DO elem=1, myDim_elem2D + DO nz=1,nlevels(elem)-1 + !v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) + !v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct + v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct + !Scaling by sqrt(2*elem_area) or sqrt(elem_area)? + END DO + END DO + call exchange_elem(v_back, partit) + end subroutine backscatter_coef + ! + ! + !_______________________________________________________________________________ + subroutine uke_update(dynamics, partit, mesh) + IMPLICIT NONE + + !I had to change uke(:) to uke(:,:) to make output and restart work!! + !Why is it necessary to implement the length of the array? It doesn't work without! + !integer, intent(in) :: t_levels + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + + real(kind=WP) :: hall, h1_eta, hnz, vol + integer :: elnodes(3), nz, ed, edi, node, j, elem, q + real(kind=WP), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) + integer :: kk, nzmax, el + real(kind=WP) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso + real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + + !rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered + !rossby_num=2 + + ed=myDim_elem2D+eDim_elem2D + allocate(uuu(ed)) + + uke_back=0.0_WP + uke_dis=0.0_WP + DO ed=1, myDim_elem2D + DO nz=1, nlevels(ed)-1 + uke_dis(nz,ed) =(UV(1,nz,ed)*UV_dis_tend( 1,nz,ed)+UV(2,nz,ed)*UV_dis_tend( 2,nz,ed)) + uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) + END DO + END DO + + DO nz=1,nl-1 + uuu=0.0_8 + uuu=uke_back(nz,:) + call smooth_elem(uuu,smooth_back, partit, mesh) !3) ? + uke_back(nz,:)=uuu + END DO + + !Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice + !Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic + !uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) + ed=myDim_elem2D+eDim_elem2D + allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) + allocate(work_uv(myDim_nod2D+eDim_nod2D)) + allocate(rosb_array(nl-1,ed)) + call exchange_elem(UV, partit) + rosb_array=0._WP + DO nz=1, nl-1 + work_uv=0._WP + DO node=1, myDim_nod2D + vol=0._WP + U_work(nz,node)=0._WP + V_work(nz,node)=0._WP + DO j=1, nod_in_elem2D_num(node) + elem=nod_in_elem2D(j, node) + U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) + V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) + vol=vol+elem_area(elem) + END DO + U_work(nz,node)=U_work(nz,node)/vol + V_work(nz,node)=U_work(nz,node)/vol + END DO + work_uv=U_work(nz,:) + call exchange_nod(work_uv, partit) + U_work(nz,:)=work_uv + work_uv=V_work(nz,:) + call exchange_nod(work_uv, partit) + V_work(nz,:)=work_uv + END DO + + DO el=1,myDim_elem2D + DO nz=1, nlevels(el)-1 + rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& + sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& + (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& + sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) + ! hall=hall+hnz + END DO + ! rosb_array(el)=rosb_array(el)/hall + END DO + + DO ed=1, myDim_elem2D + scaling=1._WP + IF(uke_scaling) then + reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) + rosb=0._wp + elnodes=elem2D_nodes(:, ed) + DO kk=1,3 + c1=0._wp + nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) + !Vertical average; same scaling in the vertical + DO nz=1, nzmax-1 + c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. + END DO + c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min + !Cutoff K_GM depending on (Resolution/Rossby radius) ratio + rosb=rosb+min(c1/max(abs(mesh%coriolis_node(elnodes(kk))), f_min), r_max) + END DO + rosb=rosb/3._WP + scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) + END IF + + DO nz=1, nlevels(ed)-1 + elnodes=elem2D_nodes(:,ed) + + !Taking out that one place where it is always weird (Pacific Southern Ocean) + !Should not really be used later on, once we fix the issue with the 1/4 degree grid + if(.not. (TRIM(which_toy)=="soufflet")) then + call elem_center(ed, ex, ey) + !a1=-104.*rad + !a2=-49.*rad + call g2r(-104.*rad, -49.*rad, a1, a2) + dist_reg(1)=ex-a1 + dist_reg(2)=ey-a2 + call trim_cyclic(dist_reg(1)) + dist_reg(1)=dist_reg(1)*elem_cos(ed) + dist_reg=dist_reg*r_earth + len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) + + !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(mesh%coriolis_node(elnodes(:)))), f_min) + !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) + uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) + else + rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(mesh%coriolis_node(elnodes(:)))), f_min) + !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) + uke_dis(nz,ed)=scaling*1._WP/(1._WP+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) + end if + END DO + END DO + + deallocate(U_work, V_work) + deallocate(rosb_array) + deallocate(work_uv) + + call exchange_elem(uke_dis, partit) + DO nz=1, nl-1 + uuu=uke_dis(nz,:) + call smooth_elem(uuu,smooth_dis, partit, mesh) + uke_dis(nz,:)=uuu + END DO + DO ed=1, myDim_elem2D + DO nz=1,nlevels(ed)-1 + uke_rhs_old(nz,ed)=uke_rhs(nz,ed) + uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) + uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) + END DO + END DO + + call exchange_elem(uke, partit) + deallocate(uuu) + + end subroutine uke_update +end module g_backscatter + diff --git a/src/gen_modules_clock.F90 b/src/gen_modules_clock.F90 index b90aa0a0d..40f9abc31 100755 --- a/src/gen_modules_clock.F90 +++ b/src/gen_modules_clock.F90 @@ -65,12 +65,14 @@ end subroutine clock ! !-------------------------------------------------------------------------------- ! - subroutine clock_init - use g_parsup + subroutine clock_init(partit) + USE MOD_PARTIT + USE MOD_PARSUP use g_config implicit none - integer :: i, daystart, yearstart - real(kind=WP) :: aux1, aux2, timestart + type(t_partit), intent(in), target :: partit + integer :: i, daystart, yearstart + real(kind=WP) :: aux1, aux2, timestart ! the model initialized at timestart=timenew @@ -123,18 +125,18 @@ subroutine clock_init aux1=aux2 end do - if(mype==0) then + if(partit%mype==0) then if(r_restart) then write(*,*) print *, achar(27)//'[31m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[5;7;31m'//' --> THIS IS A RESTART RUN !!! '//achar(27)//'[0m' - write(*,"(A, F5.2, I4, I5)") ' > clock restarted at time:', timenew, daynew, yearnew + write(*,"(A, F8.2, I4, I5)") ' > clock restarted at time:', timenew, daynew, yearnew write(*,*) else write(*,*) print *, achar(27)//'[32m' //'____________________________________________________________'//achar(27)//'[0m' print *, achar(27)//'[7;32m'//' --> THIS IS A INITIALISATION RUN !!! '//achar(27)//'[0m' - write(*,"(A, F5.2, I4, I5)")' > clock initialized at time:', timenew, daynew, yearnew + write(*,"(A, F8.2, I4, I5)")' > clock initialized at time:', timenew, daynew, yearnew write(*,*) end if end if diff --git a/src/gen_modules_config.F90 b/src/gen_modules_config.F90 index df06463bb..dabd1580c 100755 --- a/src/gen_modules_config.F90 +++ b/src/gen_modules_config.F90 @@ -34,10 +34,16 @@ module g_config ! *** restart_log *** integer :: logfile_outfreq=1 ! logfile info. outp. freq., # steps integer :: restart_length=1 - character :: restart_length_unit='m' + character(3) :: restart_length_unit='m' + integer :: raw_restart_length=1 + character(3) :: raw_restart_length_unit='m' + integer :: bin_restart_length=1 + character(3) :: bin_restart_length_unit='m' - namelist /restart_log/ restart_length, restart_length_unit, logfile_outfreq - + namelist /restart_log/ restart_length , restart_length_unit, & + raw_restart_length, raw_restart_length_unit, & + bin_restart_length, bin_restart_length_unit, & + logfile_outfreq !_____________________________________________________________________________ ! *** ale_def *** ! Which ALE case to use : 'linfs', 'zlevel', 'zstar' @@ -80,14 +86,23 @@ module g_config real(kind=WP) :: gammaEuler=-90. ! then around new z. ! Set to zeros to work with ! geographical coordinates - character(len=5) :: which_depth_n2e='mean' - namelist /geometry/ cartesian, fplane, & - cyclic_length, rotated_grid, alphaEuler, betaEuler, gammaEuler, force_rotation, which_depth_n2e + integer :: thers_zbar_lev=5 ! minimum number of levels to be + character(len=5) :: which_depth_n2e='mean' + + logical :: use_depthonelem =.false. + character(len=10) :: use_depthfile='aux3d' ! 'aux3d', 'depth@' + logical :: use_cavityonelem=.false. + + namelist /geometry/ cartesian, fplane, & + cyclic_length, rotated_grid, force_rotation, & + alphaEuler, betaEuler, gammaEuler, & + which_depth_n2e, use_depthonelem, use_cavityonelem, use_depthfile !_____________________________________________________________________________ ! *** fleap_year *** logical :: include_fleapyear=.false. - namelist /calendar/ include_fleapyear + logical :: use_flpyrcheck =.true. + namelist /calendar/ include_fleapyear, use_flpyrcheck !_____________________________________________________________________________ ! *** machine *** diff --git a/src/gen_modules_cvmix_idemix.F90 b/src/gen_modules_cvmix_idemix.F90 index 9259b0649..8a772f2b0 100644 --- a/src/gen_modules_cvmix_idemix.F90 +++ b/src/gen_modules_cvmix_idemix.F90 @@ -27,7 +27,8 @@ module g_cvmix_idemix use g_config , only: dt use o_param use mod_mesh - use g_parsup + USE MOD_PARTIT + USE MOD_PARSUP use o_arrays use g_comm_auto use g_read_other_NetCDF @@ -37,7 +38,8 @@ module g_cvmix_idemix !___________________________________________________________________________ ! OCECTL/CVMIX_IDEMIX_PARAM namelist parameters ! time scale for vertical symmetrisation (sec) - real(kind=WP) :: idemix_tau_v = 86400.0 + ! real(kind=WP) :: idemix_tau_v = 86400.0 ! old + real(kind=WP) :: idemix_tau_v = 172800.0 ! from Pollman et al. (2017), use 2days ! time scale for horizontal symmetrisation, only necessary for lateral diffusion (sec) real(kind=WP) :: idemix_tau_h = 1296000.0 @@ -46,25 +48,31 @@ module g_cvmix_idemix real(kind=WP) :: idemix_gamma = 1.570 ! spectral bandwidth in modes (dimensionless) - real(kind=WP) :: idemix_jstar = 10.0 + ! real(kind=WP) :: idemix_jstar = 10.0 ! old + real(kind=WP) :: idemix_jstar = 5.0 ! from Pollman et al. (2017) ! dissipation parameter (dimensionless) - real(kind=WP) :: idemix_mu0 = 1.33333333 + ! real(kind=WP) :: idemix_mu0 = 1.33333333 ! old + real(kind=WP) :: idemix_mu0 = 0.33333333 ! from Pollman et al. (2017), use 2days ! amount of surface forcing that is used real(kind=WP) :: idemix_sforcusage = 0.2 - integer :: idemix_n_hor_iwe_prop_iter = 1 + ! integer :: idemix_n_hor_iwe_prop_iter = 1 ! old + integer :: idemix_n_hor_iwe_prop_iter = 5 ! from Pollman et al. (2017) ! filelocation for idemix surface forcing character(MAX_PATH):: idemix_surforc_file = './fourier_smooth_2005_cfsr_inert_rgrid.nc' - + character(MAX_PATH):: idemix_surforc_vname= 'var706' + ! filelocation for idemix bottom forcing character(MAX_PATH):: idemix_botforc_file = './tidal_energy_gx1v6_20090205_rgrid.nc' - + character(MAX_PATH):: idemix_botforc_vname= 'wave_dissipation' + namelist /param_idemix/ idemix_tau_v, idemix_tau_h, idemix_gamma, idemix_jstar, idemix_mu0, idemix_n_hor_iwe_prop_iter, & - idemix_sforcusage, idemix_surforc_file, idemix_botforc_file + idemix_sforcusage, idemix_surforc_file, idemix_surforc_vname, & + idemix_botforc_file, idemix_botforc_vname @@ -114,15 +122,18 @@ module g_cvmix_idemix !=========================================================================== ! allocate and initialize IDEMIX variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_idemix(mesh) + subroutine init_cvmix_idemix(partit, mesh) implicit none character(len=cvmix_strlen) :: nmlfile logical :: file_exist=.False. integer :: node_size - type(t_mesh), intent(in), target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -208,8 +219,8 @@ subroutine init_cvmix_idemix(mesh) write(*,*) " idemix_jstar = ", idemix_jstar write(*,*) " idemix_mu0 = ", idemix_mu0 write(*,*) " idemix_n_hor_iwe_...= ", idemix_n_hor_iwe_prop_iter - write(*,*) " idemix_surforc_file = ", idemix_surforc_file - write(*,*) " idemix_botforc_file = ", idemix_botforc_file + write(*,*) " idemix_surforc_file = ", trim(idemix_surforc_file) + write(*,*) " idemix_botforc_file = ", trim(idemix_botforc_file) write(*,*) end if @@ -220,7 +231,7 @@ subroutine init_cvmix_idemix(mesh) inquire(file=trim(idemix_surforc_file),exist=file_exist) if (file_exist) then if (mype==0) write(*,*) ' --> read IDEMIX near inertial wave surface forcing' - call read_other_NetCDF(idemix_surforc_file, 'var706', 1, forc_iw_surface_2D, .true., mesh) + call read_other_NetCDF(idemix_surforc_file, idemix_surforc_vname, 1, forc_iw_surface_2D, .true., partit, mesh) ! only 20% of the niw-input are available to penetrate into the deeper ocean forc_iw_surface_2D = forc_iw_surface_2D/density_0 * idemix_sforcusage @@ -233,7 +244,7 @@ subroutine init_cvmix_idemix(mesh) write(*,*) ' idemix_botforc_file' write(*,*) '____________________________________________________________________' end if - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if !_______________________________________________________________________ @@ -243,7 +254,7 @@ subroutine init_cvmix_idemix(mesh) inquire(file=trim(idemix_surforc_file),exist=file_exist) if (file_exist) then if (mype==0) write(*,*) ' --> read IDEMIX near tidal bottom forcing' - call read_other_NetCDF(idemix_botforc_file, 'wave_dissipation', 1, forc_iw_bottom_2D, .true., mesh) + call read_other_NetCDF(idemix_botforc_file, idemix_botforc_vname, 1, forc_iw_bottom_2D, .true., partit, mesh) ! convert from W/m^2 to m^3/s^3 forc_iw_bottom_2D = forc_iw_bottom_2D/density_0 @@ -256,7 +267,7 @@ subroutine init_cvmix_idemix(mesh) write(*,*) ' idemix_botforc_file' write(*,*) '____________________________________________________________________' end if - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if !_______________________________________________________________________ @@ -268,17 +279,21 @@ end subroutine init_cvmix_idemix ! !=========================================================================== ! calculate IDEMIX internal wave energy and its dissipation - subroutine calc_cvmix_idemix(mesh) + subroutine calc_cvmix_idemix(partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: node, elem, edge, node_size - integer :: nz, nln, nl1, nl2, nl12, nu1, nu2, nu12 + integer :: nz, nln, nl1, nl2, nl12, nu1, nu2, nu12, uln integer :: elnodes1(3), elnodes2(3), el(2), ednodes(2) real(kind=WP) :: dz_trr(mesh%nl), dz_trr2(mesh%nl), bvfreq2(mesh%nl), vflux, dz_el, aux, cflfac real(kind=WP) :: grad_v0Eiw(2), deltaX1, deltaY1, deltaX2, deltaY2 logical :: debug=.false. -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! nils tstep_count = tstep_count + 1 @@ -286,20 +301,21 @@ subroutine calc_cvmix_idemix(mesh) node_size = myDim_nod2D do node = 1,node_size nln = nlevels_nod2D(node)-1 + uln = ulevels_nod2D(node) !___________________________________________________________________ ! calculate for TKE square of Brünt-Väisälä frequency, be aware that ! bvfreq contains already the squared brünt Väisälä frequency ... - bvfreq2 = 0.0_WP - bvfreq2(2:nln) = bvfreq(2:nln,node) + bvfreq2 = 0.0_WP + bvfreq2(uln:nln) = bvfreq(uln:nln,node) !___________________________________________________________________ ! dz_trr distance between tracer points, surface and bottom dz_trr is half ! the layerthickness ... - dz_trr = 0.0_WP - dz_trr(2:nln) = abs(Z_3d_n(1:nln-1,node)-Z_3d_n(2:nln,node)) - dz_trr(1) = hnode(1,node)/2.0_WP - dz_trr(nln+1) = hnode(nln,node)/2.0_WP + dz_trr = 0.0_WP + dz_trr(uln+1:nln)= abs(Z_3d_n(uln:nln-1,node)-Z_3d_n(uln+1:nln,node)) + dz_trr(uln) = hnode(uln,node)/2.0_WP + dz_trr(nln+1) = hnode(nln,node)/2.0_WP !___________________________________________________________________ ! main call to calculate idemix @@ -307,41 +323,42 @@ subroutine calc_cvmix_idemix(mesh) call cvmix_coeffs_idemix(& ! parameter - dzw = hnode(:,node), & - dzt = dz_trr(:), & - nlev = nln, & - max_nlev = nl-1, & - dtime = dt, & - coriolis = coriolis_node(node), & + dzw = hnode(uln:nln,node), & + dzt = dz_trr(uln:nln+1), & +! nlev = nln, & + nlev = nln-uln+1, & + max_nlev = nl-1, & + dtime = dt, & + coriolis = mesh%coriolis_node(node), & ! essentials - iwe_new = iwe(:,node), & ! out - iwe_old = iwe_old(:), & ! in - forc_iw_surface = forc_iw_surface_2D(node), & ! in - forc_iw_bottom = forc_iw_bottom_2D(node), & ! in + iwe_new = iwe(uln:nln+1,node), & ! out + iwe_old = iwe_old(uln:nln+1), & ! in + forc_iw_surface = forc_iw_surface_2D(node), & ! in + forc_iw_bottom = forc_iw_bottom_2D(node), & ! in ! FIXME: nils: better output IDEMIX Ri directly - alpha_c = iwe_alpha_c(:,node), & ! out (for Ri IMIX) + alpha_c = iwe_alpha_c(uln:nln+1,node), & ! out (for Ri IMIX) ! only for Osborn shortcut ! FIXME: nils: put this to cvmix_tke - KappaM_out = iwe_Av(:,node), & ! out - KappaH_out = iwe_Kv(:,node), & ! out - Nsqr = bvfreq2(:), & ! in + KappaM_out = iwe_Av( uln:nln+1,node), & ! out + KappaH_out = iwe_Kv( uln:nln+1,node), & ! out + Nsqr = bvfreq2( uln:nln+1), & ! in ! diagnostics - iwe_Ttot = iwe_Ttot(:,node), & - iwe_Tdif = iwe_Tdif(:,node), & - iwe_Thdi = iwe_Thdi(:,node), & - iwe_Tdis = iwe_Tdis(:,node), & - iwe_Tsur = iwe_Tsur(:,node), & - iwe_Tbot = iwe_Tbot(:,node), & - c0 = iwe_c0(:,node), & - v0 = iwe_v0(:,node), & + iwe_Ttot = iwe_Ttot(uln:nln+1,node), & + iwe_Tdif = iwe_Tdif(uln:nln+1,node), & + iwe_Thdi = iwe_Thdi(uln:nln+1,node), & + iwe_Tdis = iwe_Tdis(uln:nln+1,node), & + iwe_Tsur = iwe_Tsur(uln:nln+1,node), & + iwe_Tbot = iwe_Tbot(uln:nln+1,node), & + c0 = iwe_c0( uln:nln+1,node), & + v0 = iwe_v0( uln:nln+1,node), & ! debugging - debug = debug, & + debug = debug, & !i = i, & !j = j, & !tstep_count = tstep_count, & - cvmix_int_1 = cvmix_dummy_1(:,node), & - cvmix_int_2 = cvmix_dummy_2(:,node), & - cvmix_int_3 = cvmix_dummy_3(:,node) & + cvmix_int_1 = cvmix_dummy_1(uln:nln+1,node), & + cvmix_int_2 = cvmix_dummy_2(uln:nln+1,node), & + cvmix_int_3 = cvmix_dummy_3(uln:nln+1,node) & ) end do !-->do node = 1,node_size @@ -362,66 +379,71 @@ subroutine calc_cvmix_idemix(mesh) ! make boundary exchange for iwe, and iwe_v0 --> for propagation need ! to calculate edge contribution that crosses the halo - call exchange_nod(iwe) - - !___________________________________________________________________ - ! calculate inverse volume and restrict iwe_v0 to fullfill stability - ! criterium --> CFL - ! CFL Diffusion : CFL = v0^2 * dt/dx^2, CFL < 0.5 - ! --> limit v0 to CFL=0.2 - ! --> v0 = sqrt(CFL * dx^2 / dt) - cflfac = 0.2_WP - ! |--> FROM NILS: "fac=0.2 ist geschätzt. Würde ich erstmal so - ! probieren. Der kommt aus dem stabilitätskriterium für Diffusion - ! (ähnlich berechnet wie das CFL Kriterium nur halt für den - ! Diffusions anstatt für den Advektionsterm). Normalerweise - ! sollte der Grenzwert aber nicht zu oft auftreten. Ich hatte - ! mal damit rum-experimentiert, aber letztendlich war die Lösung - ! das Iterativ zu machen und ggf. idemix_n_hor_iwe_prop_iter zu erhöhen. - ! Du kannst IDEMIX erstmal ohne den Term ausprobieren und sehen, - ! ob es läuft, dann kannst du den dazuschalten und hoffen, dass - ! es nicht explodiert. Eigentlich sollte der Term alles glatter - ! machen, aber nahe der ML kann der schon Probleme machen". - do node = 1,node_size + call exchange_nod(iwe, partit) - ! temporarily store old iwe values for diag - iwe_Thdi(:,node) = iwe(:,node) - - ! number of above bottom levels at node - nln = nlevels_nod2D(node)-1 - - ! thickness of mid-level to mid-level interface at node - dz_trr = 0.0_WP - dz_trr(2:nln) = Z_3d_n(1:nln-1,node)-Z_3d_n(2:nln,node) - dz_trr(1) = hnode(1,node)/2.0_WP - dz_trr(nln+1) = hnode(nln,node)/2.0_WP - - ! surface cell - vol_wcelli(1,node) = 1/(area(1,node)*dz_trr(1)) - aux = sqrt(cflfac*(area(1,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) - iwe_v0(1,node) = min(iwe_v0(1,node),aux) - - ! bulk cells - !!PS do nz=2,nln - do nz=ulevels_nod2D(node)+1,nln - ! inverse volumne - vol_wcelli(nz,node) = 1/(area(nz-1,node)*dz_trr(nz)) - - ! restrict iwe_v0 - aux = sqrt(cflfac*(area(nz-1,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) - ! `--------+-------------´ - ! |-> comes from mesh_resolution=sqrt(area(1, :)/pi)*2._WP - iwe_v0(nz,node) = min(iwe_v0(nz,node),aux) - end do - - ! bottom cell - vol_wcelli(nln+1,node) = 1/(area(nln,node)*dz_trr(nln+1)) - aux = sqrt(cflfac*(area(nln,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) - iwe_v0(nln+1,node) = min(iwe_v0(nln+1,node),aux) - - end do !-->do node = 1,node_size - call exchange_nod(vol_wcelli) - call exchange_nod(iwe_v0) +! ! !___________________________________________________________________ +! ! ! calculate inverse volume and restrict iwe_v0 to fullfill stability +! ! ! criterium --> CFL +! ! ! CFL Diffusion : CFL = v0^2 * dt/dx^2, CFL < 0.5 +! ! ! --> limit v0 to CFL=0.2 +! ! ! --> v0 = sqrt(CFL * dx^2 / dt) +! ! cflfac = 0.2_WP +! ! ! |--> FROM NILS: "fac=0.2 ist geschätzt. Würde ich erstmal so +! ! ! probieren. Der kommt aus dem stabilitätskriterium für Diffusion +! ! ! (ähnlich berechnet wie das CFL Kriterium nur halt für den +! ! ! Diffusions anstatt für den Advektionsterm). Normalerweise +! ! ! sollte der Grenzwert aber nicht zu oft auftreten. Ich hatte +! ! ! mal damit rum-experimentiert, aber letztendlich war die Lösung +! ! ! das Iterativ zu machen und ggf. idemix_n_hor_iwe_prop_iter zu erhöhen. +! ! ! Du kannst IDEMIX erstmal ohne den Term ausprobieren und sehen, +! ! ! ob es läuft, dann kannst du den dazuschalten und hoffen, dass +! ! ! es nicht explodiert. Eigentlich sollte der Term alles glatter +! ! ! machen, aber nahe der ML kann der schon Probleme machen". +! ! +! ! ! temporarily store old iwe values for diag +! ! iwe_Thdi = iwe +! ! +! ! do node = 1,node_size +! ! +! ! ! temporarily store old iwe values for diag +! ! iwe_Thdi(:,node) = iwe(:,node) +! ! +! ! ! number of above bottom levels at node +! ! nln = nlevels_nod2D(node)-1 +! ! uln = ulevels_nod2D(node) +! ! +! ! ! thickness of mid-level to mid-level interface at node +! ! dz_trr = 0.0_WP +! ! dz_trr(uln+1:nln) = Z_3d_n(uln:nln-1,node)-Z_3d_n(uln+1:nln,node) +! ! dz_trr(uln) = hnode(uln,node)/2.0_WP +! ! dz_trr(nln+1) = hnode(nln,node)/2.0_WP +! ! +! ! ! surface cell +! ! vol_wcelli(uln,node) = 1/(areasvol(uln,node)*dz_trr(uln)) +! ! aux = sqrt(cflfac*(area(uln,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) +! ! iwe_v0(uln,node) = min(iwe_v0(uln,node),aux) +! ! +! ! ! bulk cells +! ! !!PS do nz=2,nln +! ! do nz=uln+1,nln +! ! ! inverse volumne +! ! vol_wcelli(nz,node) = 1/(areasvol(nz-1,node)*dz_trr(nz)) +! ! +! ! ! restrict iwe_v0 +! ! aux = sqrt(cflfac*(area(nz-1,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) +! ! ! `--------+-------------´ +! ! ! |-> comes from mesh_resolution=sqrt(area(1, :)/pi)*2._WP +! ! iwe_v0(nz,node) = min(iwe_v0(nz,node),aux) +! ! end do +! ! +! ! ! bottom cell +! ! vol_wcelli(nln+1,node) = 1/(areasvol(nln,node)*dz_trr(nln+1)) +! ! aux = sqrt(cflfac*(area(nln,node)/pi*4.0_WP)/(idemix_tau_h*dt/idemix_n_hor_iwe_prop_iter)) +! ! iwe_v0(nln+1,node) = min(iwe_v0(nln+1,node),aux) +! ! +! ! end do !-->do node = 1,node_size +! ! call exchange_nod(vol_wcelli, partit) + call exchange_nod(iwe_v0, partit) !___________________________________________________________________ ! calculate horizontal diffusion term for internal wave energy @@ -457,7 +479,7 @@ subroutine calc_cvmix_idemix(mesh) ! thickness of mid-level to mid-level interface of element el(1) dz_trr = 0.0_WP - dz_trr(1) = helem(1,el(1))/2.0_WP + dz_trr(nu1) = helem(nu1,el(1))/2.0_WP !!PS do nz=2,nl1-1 do nz=nu1+1,nl1-1 dz_trr(nz) = helem(nz-1,el(1))/2.0_WP + helem(nz,el(1))/2.0_WP @@ -478,7 +500,7 @@ subroutine calc_cvmix_idemix(mesh) ! thickness of mid-level to mid-level interface of element el(2) dz_trr2 = 0.0_WP - dz_trr2(1) = helem(1,el(2))/2.0_WP + dz_trr2(nu2) = helem(nu2,el(2))/2.0_WP !!PS do nz=2,nl2-1 do nz=nu2+1,nl2-1 dz_trr2(nz) = helem(nz-1,el(2))/2.0_WP + helem(nz,el(2))/2.0_WP @@ -664,12 +686,12 @@ subroutine calc_cvmix_idemix(mesh) if(mix_scheme_nmb==6) then !___________________________________________________________________ ! write out diffusivity - call exchange_nod(iwe_Kv) + call exchange_nod(iwe_Kv, partit) Kv = iwe_Kv !___________________________________________________________________ ! write out viscosity -->interpolate therefor from nodes to elements - call exchange_nod(iwe_Av) !Warning: don't forget to communicate before averaging on elements!!! + call exchange_nod(iwe_Av, partit) !Warning: don't forget to communicate before averaging on elements!!! do elem=1, myDim_elem2D elnodes1=elem2D_nodes(:,elem) !!PS do nz=1,nlevels(elem)-1 diff --git a/src/gen_modules_cvmix_kpp.F90 b/src/gen_modules_cvmix_kpp.F90 index 823093fec..0f7d7916c 100644 --- a/src/gen_modules_cvmix_kpp.F90 +++ b/src/gen_modules_cvmix_kpp.F90 @@ -22,11 +22,14 @@ module g_cvmix_kpp ! module calls from FESOM use g_config use o_param - use mod_mesh - use g_parsup + USE MOD_ICE + USE MOD_DYN + USE mod_tracer + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use o_arrays use g_comm_auto - use i_arrays use g_forcing_arrays use g_support use o_mixing_KPP_mod @@ -218,13 +221,18 @@ module g_cvmix_kpp !=========================================================================== ! allocate and initialize CVMIX KPP variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_kpp(mesh) + subroutine init_cvmix_kpp(partit, mesh) implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit character(len=MAX_PATH) :: nmlfile logical :: nmlfile_exist=.False. integer :: node_size - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + integer fileunit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -272,9 +280,9 @@ subroutine init_cvmix_kpp(mesh) ! check if cvmix namelist file exists if not use default values inquire(file=trim(nmlfile),exist=nmlfile_exist) if (nmlfile_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_kpp) - close(20) + open(newunit=fileunit,file=trim(nmlfile)) + read(fileunit,nml=param_kpp) + close(fileunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if @@ -341,8 +349,13 @@ end subroutine init_cvmix_kpp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_kpp(mesh) - type(t_mesh), intent(in) , target :: mesh + subroutine calc_cvmix_kpp(ice, dynamics, tracers, partit, mesh) + type(t_ice) , intent(in), target :: ice + type(t_dyn) , intent(in), target :: dynamics + type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !_______________________________________________________________________ integer :: node, elem, nz, nln, nun, elnodes(3), aux_nz real(kind=WP) :: vshear2, dz2, aux, aux_wm(mesh%nl), aux_ws(mesh%nl) real(kind=WP) :: aux_coeff, sigma, stable @@ -352,7 +365,20 @@ subroutine calc_cvmix_kpp(mesh) real(kind=WP) :: sldepth, sfc_temp, sfc_salt, sfc_u, sfc_v, htot, delh, rho_sfc, rho_nz real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 real(kind=WP) :: sfc_rhopot, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2 -#include "associate_mesh.h" + !_______________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice + real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:,:,:), pointer :: UVnode +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) + UVnode=>dynamics%uvnode(:,:,:) + a_ice => ice%data(1)%values(:) + !_______________________________________________________________________ kpp_Av = 0.0_WP kpp_Kv = 0.0_WP @@ -388,15 +414,15 @@ subroutine calc_cvmix_kpp(mesh) !___________________________________________________________ ! calculate squared velocity shear referenced to the surface ! --> cvmix wants to have it with respect to the midlevel rather than full levels - !!PS kpp_dvsurf2(nz) = ((Unode(1,nz-1,node)+Unode(1,nz,node))*0.5 - Unode( 1,1,node) )**2 + & - !!PS ((Unode(2,nz-1,node)+Unode(2,nz,node))*0.5 - Unode( 2,1,node) )**2 - kpp_dvsurf2(nz) = ((Unode(1,nz-1,node)+Unode(1,nz,node))*0.5 - Unode( 1,nun,node) )**2 + & - ((Unode(2,nz-1,node)+Unode(2,nz,node))*0.5 - Unode( 2,nun,node) )**2 + !!PS kpp_dvsurf2(nz) = ((UVnode(1,nz-1,node)+UVnode(1,nz,node))*0.5 - UVnode( 1,1,node) )**2 + & + !!PS ((UVnode(2,nz-1,node)+UVnode(2,nz,node))*0.5 - UVnode( 2,1,node) )**2 + kpp_dvsurf2(nz) = ((UVnode(1,nz-1,node)+UVnode(1,nz,node))*0.5 - UVnode( 1,nun,node) )**2 + & + ((UVnode(2,nz-1,node)+UVnode(2,nz,node))*0.5 - UVnode( 2,nun,node) )**2 !___________________________________________________________ ! calculate shear Richardson number Ri = N^2/(du/dz)^2 dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 + & - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 + & + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 vshear2 = vshear2/dz2 kpp_shearRi(nz) = max(bvfreq(nz,node),0.0_WP)/(vshear2+kpp_epsln) @@ -404,9 +430,9 @@ subroutine calc_cvmix_kpp(mesh) ! buoyancy difference with respect to the surface --> computed in ! oce_ale_pressure_bf.F90 --> subroutine pressure_bv ! --> dbsfc(nz,node) - !!PS call densityJM_components(tr_arr(1,node,1), tr_arr(1,node,2), sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) - call densityJM_components(tr_arr(nun,node,1), tr_arr(nun,node,2), sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) - call densityJM_components(tr_arr(nz,node,1), tr_arr(nz,node,2), bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + !!PS call densityJM_components(temp(1,node), salt(1,node), sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) + call densityJM_components(temp(nun,node), salt(nun,node), sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) + call densityJM_components(temp(nz, node), salt(nz, node), bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) rho_nz = bulk_0 + Z_3d_n(nz,node)*(bulk_pz + Z_3d_n(nz,node)*bulk_pz2) rho_nz = rho_nz*rhopot/(rho_nz+0.1_WP*Z_3d_n(nz,node))-density_0 rho_sfc = sfc_bulk_0 + Z_3d_n(nz,node)*(sfc_bulk_pz + Z_3d_n(nz,node)*sfc_bulk_pz2) @@ -441,10 +467,10 @@ subroutine calc_cvmix_kpp(mesh) do nztmp = nun, nzsfc delh = min( max(0.0_WP,sldepth-htot), hnode(nztmp,node) ) htot = htot+delh - sfc_temp = sfc_temp + tr_arr(nztmp,node,1)*delh - sfc_salt = sfc_salt + tr_arr(nztmp,node,2)*delh - sfc_u = sfc_u + Unode(1,nztmp,node) *delh - sfc_v = sfc_v + Unode(2,nztmp,node) *delh + sfc_temp = sfc_temp + temp(nztmp,node)*delh + sfc_salt = sfc_salt + salt(nztmp,node)*delh + sfc_u = sfc_u + UVnode(1,nztmp,node) *delh + sfc_v = sfc_v + UVnode(2,nztmp,node) *delh end do sfc_temp = sfc_temp/htot sfc_salt = sfc_salt/htot @@ -454,8 +480,8 @@ subroutine calc_cvmix_kpp(mesh) !___________________________________________________________ ! calculate vertical shear between present layer and surface ! averaged sfc_u and sfc_v - kpp_dvsurf2(nz) = (Unode(1,nz,node)-sfc_u)**2 + & - (Unode(2,nz,node)-sfc_v)**2 + kpp_dvsurf2(nz) = (UVnode(1,nz,node)-sfc_u)**2 + & + (UVnode(2,nz,node)-sfc_v)**2 !___________________________________________________________ ! calculate buoyancy difference between the surface averaged @@ -464,7 +490,7 @@ subroutine calc_cvmix_kpp(mesh) ! depth level as the deep point --> than calculate bouyancy ! difference call densityJM_components(sfc_temp, sfc_salt, sfc_bulk_0, sfc_bulk_pz, sfc_bulk_pz2, sfc_rhopot, mesh) - call densityJM_components(tr_arr(nz,node,1), tr_arr(nz,node,2), bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call densityJM_components(temp(nz,node), salt(nz,node), bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) rho_nz = bulk_0 + Z_3d_n(nz,node)*(bulk_pz + Z_3d_n(nz,node)*bulk_pz2) rho_nz = rho_nz*rhopot/(rho_nz+0.1_WP*Z_3d_n(nz,node))-density_0 rho_sfc = sfc_bulk_0 + Z_3d_n(nz,node)*(sfc_bulk_pz + Z_3d_n(nz,node)*sfc_bulk_pz2) @@ -478,8 +504,8 @@ subroutine calc_cvmix_kpp(mesh) ! calculate shear Richardson number Ri = N^2/(du/dz)^2 for ! mixing parameterisation below ocean boundary layer dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 + & - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 + & + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 vshear2 = vshear2/dz2 kpp_shearRi(nz) = max(bvfreq(nz,node),0.0_WP)/(vshear2+kpp_epsln) end do ! --> do nz=1, nln @@ -491,10 +517,10 @@ subroutine calc_cvmix_kpp(mesh) !!PS if (flag_debug .and. mype==0) print *, achar(27)//'[35m'//' --> call surface buyflux[0m' !!PS kpp_sbuoyflx(node) = -g * & !!PS (sw_alpha(1,node)*heat_flux( node) / vcpw + & !heat_flux & water_flux: positive up - !!PS sw_beta( 1,node)*water_flux(node)*tr_arr(1,node,2)) + !!PS sw_beta( 1,node)*water_flux(node)*salt(1,node,2)) kpp_sbuoyflx(node) = -g * & (sw_alpha(nun,node)*heat_flux( node) / vcpw + & !heat_flux & water_flux: positive up - sw_beta( nun,node)*water_flux(node)*tr_arr(nun,node,2)) + sw_beta( nun,node)*water_flux(node)*salt(nun,node)) ! calculate friction velocity (ustar) at surface (m/s) @@ -545,7 +571,7 @@ subroutine calc_cvmix_kpp(mesh) else write(*,*) " --> Error: this kpp_internalmix scheme is not supported" write(*,*) " for the mixing below the OBL, either KPP or PP !" - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end if !___________________________________________________________________ @@ -704,7 +730,7 @@ subroutine calc_cvmix_kpp(mesh) zt_cntr = Z_3d_n( nun:nln ,node), & ! (in) Height of cell centers (m) dim=(ke) surf_fric = aux_ustar, & ! (in) Turbulent friction velocity at surface (m/s) dim=1 surf_buoy = aux_surfbuoyflx_nl(1), & ! (in) Buoyancy flux at surface (m2/s3) dim=1 - Coriolis = coriolis_node(node), & ! (in) Coriolis parameter (1/s) dim=1 + Coriolis = mesh%coriolis_node(node), & ! (in) Coriolis parameter (1/s) dim=1 OBL_depth = kpp_obldepth(node), & ! (out) OBL depth (m) dim=1 kOBL_depth = kpp_nzobldepth(node) & ! (out) level (+fraction) of OBL extent dim=1 ) @@ -750,7 +776,7 @@ subroutine calc_cvmix_kpp(mesh) ! --> interpolate contribution that comes from shortwave penetration ! to the depth of the obldepth aux_surfbuoyflx_nl(1) = kpp_sbuoyflx(node) - if (use_sw_pene .and. kpp_use_fesomkpp .eqv. .true.) then + if (use_sw_pene .and. kpp_use_fesomkpp) then aux_nz = int(kpp_nzobldepth(node)) ! take only penetrated shortwave radiation heatflux into account ! that reached until the obldepth --> do linear interpolation @@ -765,7 +791,7 @@ subroutine calc_cvmix_kpp(mesh) ! MOM6 provides different option how buoyancy flux is influenced by ! short wave penetration flux ! --> mxl comes closest to what FESOM1.4 was doing - elseif (use_sw_pene .and. kpp_use_fesomkpp .eqv. .false.) then + elseif (use_sw_pene .and. (.not. kpp_use_fesomkpp)) then if (trim(kpp_sw_method) == 'all') then aux_surfbuoyflx_nl(1) = aux_surfbuoyflx_nl(1)+aux_coeff*sw_3d(1,node) elseif (trim(kpp_sw_method) == 'mxl') then @@ -889,14 +915,14 @@ subroutine calc_cvmix_kpp(mesh) ! original kpp parameterisation of FESOM1.4 & FESOM2.0 !!PS if (flag_debug .and. mype==0) print *, achar(27)//'[35m'//' --> calc smooth kpp_oblmixc'//achar(27)//'[0m' if (kpp_use_smoothblmc .and. kpp_use_fesomkpp) then - call exchange_nod(kpp_oblmixc(:,:,1)) - call exchange_nod(kpp_oblmixc(:,:,2)) - call exchange_nod(kpp_oblmixc(:,:,3)) + call exchange_nod(kpp_oblmixc(:,:,1), partit) + call exchange_nod(kpp_oblmixc(:,:,2), partit) + call exchange_nod(kpp_oblmixc(:,:,3), partit) do nz=1, 3 !_______________________________________________________________ ! all loops go over myDim_nod2D so no halo information --> for ! smoothing haloinfo is required --> therefor exchange_nod - call smooth_nod(kpp_oblmixc(:,:,nz), kpp_smoothblmc_nmb, mesh) + call smooth_nod(kpp_oblmixc(:,:,nz), kpp_smoothblmc_nmb, partit, mesh) end do end if @@ -929,13 +955,13 @@ subroutine calc_cvmix_kpp(mesh) !_______________________________________________________________________ ! write out diffusivities to FESOM2.0 --> diffusivities remain on nodes - call exchange_nod(kpp_Kv) + call exchange_nod(kpp_Kv, partit) Kv = kpp_Kv !_______________________________________________________________________ ! write out viscosities to FESOM2.0 --> viscosities for FESOM2.0 are ! defined on elements --> interpolate therefor from nodes to elements - call exchange_nod(kpp_Av) + call exchange_nod(kpp_Av, partit) Av = 0.0_WP do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) diff --git a/src/gen_modules_cvmix_pp.F90 b/src/gen_modules_cvmix_pp.F90 index 3722fcd8a..08b3f8b7d 100644 --- a/src/gen_modules_cvmix_pp.F90 +++ b/src/gen_modules_cvmix_pp.F90 @@ -25,10 +25,11 @@ module g_cvmix_pp use g_config use o_param use MOD_MESH - use g_parsup + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN use o_arrays use g_comm_auto - use i_arrays implicit none !___________________________________________________________________________ @@ -64,14 +65,18 @@ module g_cvmix_pp !=========================================================================== ! allocate and initialize CVMIX PP variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_pp(mesh) - use MOD_MESH + subroutine init_cvmix_pp(partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh - character(len=MAX_PATH) :: nmlfile - logical :: nmlfile_exist=.False. - integer :: node_size -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + character(len=MAX_PATH) :: nmlfile + logical :: nmlfile_exist=.False. + integer :: node_size + integer fileunit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -97,9 +102,9 @@ subroutine init_cvmix_pp(mesh) ! check if cvmix namelist file exists if not use default values inquire(file=trim(nmlfile),exist=nmlfile_exist) if (nmlfile_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_pp) - close(20) + open(newunit=fileunit,file=trim(nmlfile)) + read(fileunit,nml=param_pp) + close(fileunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if @@ -157,13 +162,21 @@ end subroutine init_cvmix_pp ! !=========================================================================== ! calculate PP vertrical mixing coefficients from CVMIX library - subroutine calc_cvmix_pp(mesh) + subroutine calc_cvmix_pp(dynamics, partit, mesh) use MOD_MESH + implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn), intent(inout), target :: dynamics integer :: node, elem, nz, nln, nun, elnodes(3), windnl=2, node_size real(kind=WP) :: vshear2, dz2, Kvb -#include "associate_mesh.h" + real(kind=WP), dimension(:,:,:), pointer :: UVnode +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) node_size = myDim_nod2D !_______________________________________________________________________ do node = 1,node_size @@ -177,8 +190,8 @@ subroutine calc_cvmix_pp(mesh) !!PS do nz=2,nln do nz=nun+1,nln dz2 = (Z_3d_n( nz-1,node)-Z_3d_n( nz,node))**2 - vshear2 = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 +& - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + vshear2 = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 +& + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 vshear2 = vshear2/dz2 ! WIKIPEDIA: The Richardson number is always ! considered positive. A negative value of N² (i.e. complex N) @@ -247,13 +260,13 @@ subroutine calc_cvmix_pp(mesh) !_______________________________________________________________________ ! write out diffusivities to FESOM2.0 --> diffusivities remain on nodes - call exchange_nod(pp_Kv) + call exchange_nod(pp_Kv, partit) Kv = pp_Kv !_______________________________________________________________________ ! write out viscosities to FESOM2.0 --> viscosities for FESOM2.0 are ! defined on elements --> interpolate therefor from nodes to elements - call exchange_nod(pp_Av) + call exchange_nod(pp_Av, partit) Av = 0.0_WP do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) diff --git a/src/gen_modules_cvmix_tidal.F90 b/src/gen_modules_cvmix_tidal.F90 index 8a1e5fdc9..61ae43646 100644 --- a/src/gen_modules_cvmix_tidal.F90 +++ b/src/gen_modules_cvmix_tidal.F90 @@ -15,7 +15,8 @@ module g_cvmix_tidal use g_config , only: dt use o_param use mod_mesh - use g_parsup + USE MOD_PARTIT + USE MOD_PARSUP use o_arrays use g_comm_auto use g_read_other_NetCDF @@ -76,13 +77,19 @@ module g_cvmix_tidal !=========================================================================== ! allocate and initialize IDEMIX variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_tidal(mesh) + subroutine init_cvmix_tidal(partit, mesh) character(len=MAX_PATH) :: nmlfile logical :: file_exist=.False. integer :: node_size - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer fileunit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -108,9 +115,9 @@ subroutine init_cvmix_tidal(mesh) file_exist=.False. inquire(file=trim(nmlfile),exist=file_exist) if (file_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_tidal) - close(20) + open(newunit=fileunit,file=trim(nmlfile)) + read(fileunit,nml=param_tidal) + close(fileunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if @@ -134,7 +141,7 @@ subroutine init_cvmix_tidal(mesh) inquire(file=trim(tidal_botforc_file),exist=file_exist) if (file_exist) then if (mype==0) write(*,*) ' --> read TIDAL near tidal bottom forcing' - call read_other_NetCDF(tidal_botforc_file, 'wave_dissipation', 1, tidal_forc_bottom_2D, .true., mesh) + call read_other_NetCDF(tidal_botforc_file, 'wave_dissipation', 1, tidal_forc_bottom_2D, .true., partit, mesh) !!PS ! convert from W/m^2 to m^3/s^3 !!PS tidal_forc_bottom_2D = tidal_forc_bottom_2D/density_0 ! --> the tidal energy for dissipation is divided by rho0 in @@ -148,7 +155,7 @@ subroutine init_cvmix_tidal(mesh) write(*,*) ' --> check your namelist.cvmix, tidal_botforc_file & ' write(*,*) '____________________________________________________________________' end if - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if !_______________________________________________________________________ @@ -165,14 +172,17 @@ end subroutine init_cvmix_tidal ! !=========================================================================== ! calculate TIDAL mixing parameterisation - subroutine calc_cvmix_tidal(mesh) - type(t_mesh), intent(in), target :: mesh + subroutine calc_cvmix_tidal(partit, mesh) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: node, elem, node_size integer :: nz, nln, nun integer :: elnodes(3) real(kind=WP) :: simmonscoeff, vertdep(mesh%nl) - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !_______________________________________________________________________ node_size = myDim_nod2D do node = 1,node_size @@ -226,13 +236,13 @@ subroutine calc_cvmix_tidal(mesh) ! ! MPIOM note 2: background diffusivities were already added in the mixed layer ! scheme (KPP) - call exchange_nod(tidal_Kv) + call exchange_nod(tidal_Kv, partit) Kv = Kv + tidal_Kv !_______________________________________________________________________ ! add tidal viscosity to main model diffusivity Av -->interpolate ! therefor from nodes to elements - call exchange_nod(tidal_Av) + call exchange_nod(tidal_Av, partit) do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) !!PS do nz=1,nlevels(elem)-1 diff --git a/src/gen_modules_cvmix_tke.F90 b/src/gen_modules_cvmix_tke.F90 index 1f6434812..2078cd88f 100644 --- a/src/gen_modules_cvmix_tke.F90 +++ b/src/gen_modules_cvmix_tke.F90 @@ -26,7 +26,9 @@ module g_cvmix_tke use g_config , only: dt use o_param use mod_mesh - use g_parsup + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN use o_arrays use g_comm_auto implicit none @@ -52,6 +54,9 @@ module g_cvmix_tke logical :: use_ubound_dirichlet = .false. logical :: use_lbound_dirichlet = .false. + logical :: tke_dolangmuir = .false. + real(kind=WP) :: tke_clangmuir = 0.3 + ! apply time relaxation to avo/dvo ! FIXME: nils: Do we need that logical :: timerelax_tke = .false. @@ -62,7 +67,7 @@ module g_cvmix_tke namelist /param_tke/ tke_c_k, tke_c_eps, tke_alpha, tke_mxl_min, tke_kappaM_min, tke_kappaM_max, & tke_cd, tke_surf_min, tke_min, tke_mxl_choice, & use_ubound_dirichlet, use_lbound_dirichlet, & - timerelax_tke, relne, relax + timerelax_tke, relne, relax, tke_dolangmuir, tke_clangmuir !___________________________________________________________________________ ! CVMIX-TKE 3D variables @@ -108,7 +113,13 @@ module g_cvmix_tke ! nils integer :: tstep_count - + + !___________________________________________________________________________ + ! Langmuir parameterisation + real(kind=WP), allocatable, dimension(:,:) :: tke_langmuir, langmuir_wlc + real(kind=WP), allocatable, dimension(:) :: langmuir_hlc, langmuir_ustoke + + contains ! ! @@ -116,13 +127,20 @@ module g_cvmix_tke !=========================================================================== ! allocate and initialize TKE 2D and 3D variables --> call initialisation ! routine from cvmix library - subroutine init_cvmix_tke(mesh) + subroutine init_cvmix_tke(partit, mesh) implicit none - character(len=cvmix_strlen) :: nmlfile - logical :: nmlfile_exist=.False. - integer :: node_size - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + character(len=cvmix_strlen) :: nmlfile + logical :: nmlfile_exist=.False. + integer :: node_size + integer fileunit + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + !_______________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -197,9 +215,9 @@ subroutine init_cvmix_tke(mesh) ! check if cvmix namelist file exists if not use default values inquire(file=trim(nmlfile),exist=nmlfile_exist) if (nmlfile_exist) then - open(20,file=trim(nmlfile)) - read(20,nml=param_tke) - close(20) + open(newunit=fileunit,file=trim(nmlfile)) + read(fileunit,nml=param_tke) + close(fileunit) else write(*,*) ' could not find namelist.cvmix, will use default values !' end if @@ -218,9 +236,23 @@ subroutine init_cvmix_tke(mesh) write(*,*) " tke_kappaM_min = ", tke_kappaM_min write(*,*) " tke_kappaM_max = ", tke_kappaM_max write(*,*) " tke_mxl_choice = ", tke_mxl_choice + write(*,*) " tke_dolangmuir = ", tke_dolangmuir write(*,*) end if + !_______________________________________________________________________ + !langmuir parameterisation + if (tke_dolangmuir) then + allocate(tke_langmuir(nl,node_size)) + allocate(langmuir_wlc(nl,node_size)) + allocate(langmuir_hlc(node_size)) + allocate(langmuir_ustoke(node_size)) + tke_langmuir = 0.0_WP + langmuir_wlc = 0.0_WP + langmuir_hlc = 0.0_WP + langmuir_ustoke = 0.0_WP + end if + !_______________________________________________________________________ ! call tke initialisation routine from cvmix library call init_tke(c_k = tke_c_k, & @@ -234,6 +266,8 @@ subroutine init_cvmix_tke(mesh) use_ubound_dirichlet = use_ubound_dirichlet, & use_lbound_dirichlet = use_lbound_dirichlet, & only_tke = tke_only, & + l_lc = tke_dolangmuir, & + clc = tke_clangmuir, & tke_min = tke_min, & tke_surf_min = tke_surf_min ) end subroutine init_cvmix_tke @@ -242,16 +276,23 @@ end subroutine init_cvmix_tke ! !=========================================================================== ! calculate TKE vertical mixing coefficients from CVMIX library - subroutine calc_cvmix_tke(mesh) + subroutine calc_cvmix_tke(dynamics, partit, mesh) implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn), intent(inout), target :: dynamics integer :: node, elem, nelem, nz, nln, nun, elnodes(3), node_size - real(kind=WP) :: tvol + real(kind=WP) :: tvol, aux real(kind=WP) :: dz_trr(mesh%nl), bvfreq2(mesh%nl), vshear2(mesh%nl) real(kind=WP) :: tke_Av_old(mesh%nl), tke_Kv_old(mesh%nl), tke_old(mesh%nl) + real(kind=WP), dimension(:,:,:), pointer :: UVnode + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) -#include "associate_mesh.h" - node_size = myDim_nod2D !_______________________________________________________________________ ! calculate all neccessary forcing for TKE @@ -277,22 +318,17 @@ subroutine calc_cvmix_tke(mesh) ! calcualte for TKE surface momentum forcing --> norm of nodal ! surface wind stress --> tke_forc2d_normstress --> interpolate from elements ! to nodes - tvol = 0.0_WP - do nelem=1,nod_in_elem2D_num(node) - elem = nod_in_elem2D(nelem,node) - tvol = tvol + elem_area(elem) - tke_forc2d_normstress(node) = tke_forc2d_normstress(node) & - + sqrt(stress_surf(1,elem)**2 + stress_surf(2,elem)**2)*elem_area(elem)/density_0 - end do !--> do nelem=1,nod_in_elem2D_num(node) - tke_forc2d_normstress(node) = tke_forc2d_normstress(node)/tvol - + tke_forc2d_normstress(node) = sqrt( & + stress_node_surf(1,node)**2 + & + stress_node_surf(2,node)**2 & + )/density_0 + !___________________________________________________________________ ! calculate for TKE 3D vertical velocity shear vshear2=0.0_WP - !!PS do nz=2,nln do nz=nun+1,nln - vshear2(nz)=(( Unode(1, nz-1, node) - Unode(1, nz, node))**2 + & - ( Unode(2, nz-1, node) - Unode(2, nz, node))**2)/ & + vshear2(nz)=(( UVnode(1, nz-1, node) - UVnode(1, nz, node))**2 + & + ( UVnode(2, nz-1, node) - UVnode(2, nz, node))**2)/ & ((Z_3d_n(nz-1,node)-Z_3d_n(nz,node))**2) end do @@ -313,14 +349,77 @@ subroutine calc_cvmix_tke(mesh) !___________________________________________________________________ ! dz_trr distance between tracer points, surface and bottom dz_trr is half ! the layerthickness ... - !!PS dz_trr = 0.0_WP - !!PS dz_trr(2:nln) = abs(Z_3d_n(1:nln-1,node)-Z_3d_n(2:nln,node)) - !!PS dz_trr(1) = hnode(1,node)/2.0_WP - !!PS dz_trr(nln+1) = hnode(nln,node)/2.0_WP - dz_trr = 0.0_WP - dz_trr(nun+1:nln) = abs(Z_3d_n(nun:nln-1,node)-Z_3d_n(nun+1:nln,node)) - dz_trr(nun) = hnode(nun,node)/2.0_WP - dz_trr(nln+1) = hnode(nln,node)/2.0_WP + dz_trr = 0.0_WP + dz_trr(nun+1:nln) = abs(Z_3d_n(nun:nln-1,node)-Z_3d_n(nun+1:nln,node)) + dz_trr(nun) = hnode(nun,node)/2.0_WP + dz_trr(nln+1) = hnode(nln,node)/2.0_WP + + !___________________________________________________________________ + ! calculate Langmuir cell additional term after Axell (2002) + ! --> adapted from ICON and Oliver Gutjahr + if (tke_dolangmuir) then + !_______________________________________________________________ + ! calculate Stoke's drift + ! Approximation if there is no information about the wave field + ! As done in Nemo + ! FIXME: do we need to divide tau by rho? + + ! Option used in NEMO model (https://www.nemo-ocean.eu/wp-content/ + ! uploads/NEMO_book.pdf, p.197) see also Breivik et al. (2015) + ! They assume rhoair=1.2 kg/m3 and cd=1.5e-03: + ! u_stokes = 0.016/(1.2 * 1.5e-03)^0.5 * |tau|^0.5; although they + ! seem to use rhoair=1.2 kg/m3 + ! langmuir_ustoke(node) = 0.377_wp * SQRT(tau_abs) ! [tau]=N2/m2 + ! langmuir_ustoke(node) = 0.016_wp/SQRT(1.2_wp * 1.5e-03_wp)*SQRT(tau_abs) + ! [tau]=N2/m2, rhoair=1.2, cd=1.5*10e-03 + langmuir_ustoke(node) = 0.016_WP/sqrt(1.2_WP * 1.5e-03_WP)*sqrt(tke_forc2d_normstress(node)*density_0) + + ! --> This is done in Coulevard et al (2020, doi:10.5194/gmd-13-3067-2020), see Fig.2 + ! langmuir_ustoke(node) = 0.377_wp * SQRT(forc_tke_surf_2D(jc,blockNo)) + ! --> other option from Li and Garrett (1993) + ! langmuir_ustoke(node) = 0.016_wp * fu10(jc,blockNo) + ! --> or original version from Axell (2002) + ! LLC = 0.12_wp*(u10**2/g) + ! langmuir_ustoke(node) = 0.0016*u10*EXP(depth/LLC) + + !_______________________________________________________________ + ! find depth of langmuir cell (hlc). hlc is the depth to which a water + ! parcel with kinetic energy 0.5*u_stokes**2 can reach on its own by + ! converting its kinetic energy to potential energy. + langmuir_hlc(node) = 0.0_wp + do nz=nun+1,nln + !!PS k_hlc = nz + aux = sum(-bvfreq2(2:nz+1)*zbar_3d_n(2:nz+1,node) ) + if(aux > 0.5_wp*langmuir_ustoke(node)**2.0_wp) then + !!PS k_hlc = nz + langmuir_hlc(node) = -zbar_3d_n(nz,node) + exit + end if + end do + + !_______________________________________________________________ + ! calculate langmuir cell velocity scale (wlc) + ! Note: Couvelard et al (2020) set clc=0.3 instead of default 0.15 from + ! Axell (2002); results in deeper MLDs and better spatial MLD pattern. + langmuir_wlc(:,node) = 0.0_wp + do nz=nun+1,nln + if(-zbar_3d_n(nz,node) <= langmuir_hlc(node)) then + langmuir_wlc(nz,node) = tke_clangmuir * langmuir_ustoke(node) * & + sin(-pi*zbar_3d_n(nz,node)/langmuir_hlc(node)) + !!PS else + !!PS langmuir_wlc(nz,node) = 0.0_wp + endif + end do + + !_______________________________________________________________ + ! calculate langmuir turbulence term (tke_plc) + if (langmuir_hlc(node) > 0.0_wp) then + tke_langmuir(:,node) = langmuir_wlc(:,node)**3.0_wp / langmuir_hlc(node) + else + tke_langmuir(:,node) = 0.0_wp + end if + + end if !___________________________________________________________________ ! main cvmix call to calculate tke @@ -330,44 +429,46 @@ subroutine calc_cvmix_tke(mesh) call cvmix_coeffs_tke(& ! parameter - dzw = hnode(:,node), & ! distance between layer interface --> hnode - dzt = dz_trr(:), & ! distnace between tracer points - nlev = nln, & + dzw = hnode(nun:nln,node), & ! distance between layer interface --> hnode + dzt = dz_trr(nun:nln+1), & ! distnace between tracer points +! nlev = nln, & + nlev = nln-nun+1, & max_nlev = nl-1, & dtime = dt, & rho_ref = density_0, & grav = g, & ! essentials - tke_new = tke(:,node), & ! out--> turbulent kinetic energy - KappaM_out = tke_Av(:,node), & ! out - KappaH_out = tke_Kv(:,node), & ! out - tke_old = tke_old(:), & ! in --> turbulent kinetic energy previous time step - old_KappaM = tke_Av_old(:), & ! in - old_KappaH = tke_Kv_old(:), & ! in - Ssqr = vshear2(:), & ! in --> square vert. vel. shear - Nsqr = bvfreq2(:), & ! in --> square brunt Väisälä freq - alpha_c = tke_in3d_iwealphac(:,node), & ! in for IDEMIX Ri - E_iw = tke_in3d_iwe(:,node), & ! in for IDEMIX Ri + tke_new = tke( nun:nln+1,node), & ! out--> turbulent kinetic energy + KappaM_out = tke_Av( nun:nln+1,node), & ! out + KappaH_out = tke_Kv( nun:nln+1,node), & ! out + tke_old = tke_old( nun:nln+1), & ! in --> turbulent kinetic energy previous time step + old_KappaM = tke_Av_old(nun:nln+1), & ! in + old_KappaH = tke_Kv_old(nun:nln+1), & ! in + Ssqr = vshear2( nun:nln+1), & ! in --> square vert. vel. shear + Nsqr = bvfreq2( nun:nln+1), & ! in --> square brunt Väisälä freq + alpha_c = tke_in3d_iwealphac(nun:nln+1,node), & ! in for IDEMIX Ri + E_iw = tke_in3d_iwe(nun:nln+1,node), & ! in for IDEMIX Ri ! forcing - forc_tke_surf= tke_forc2d_normstress(node), & ! in --> wind stress - forc_rho_surf= tke_forc2d_rhosurf(node), & ! in - bottom_fric = tke_forc2d_botfrict(node), & ! in - iw_diss = tke_in3d_iwdis(:,node), & ! in + forc_tke_surf= tke_forc2d_normstress( node), & ! in --> wind stress + forc_rho_surf= tke_forc2d_rhosurf( node), & ! in + bottom_fric = tke_forc2d_botfrict( node), & ! in + iw_diss = tke_in3d_iwdis(nun:nln+1,node), & ! in ! diagnostics - tke_Tbpr = tke_Tbpr(:,node), & ! buoyancy production - tke_Tspr = tke_Tspr(:,node), & ! shear production - tke_Tdif = tke_Tdif(:,node), & ! vertical diffusion d/dz(k d/dz)TKE - tke_Tdis = tke_Tdis(:,node), & ! dissipation - tke_Twin = tke_Twin(:,node), & ! wind forcing - tke_Tiwf = tke_Tiwf(:,node), & ! internal wave forcing when idemix is used - tke_Tbck = tke_Tbck(:,node), & ! background forcing only active if IDEMIX is not active, forcing that results from resetting TKE to minimum background TKE value - tke_Ttot = tke_Ttot(:,node), & ! sum of all terms - tke_Lmix = tke_Lmix(:,node), & ! mixing length scale of the TKE scheme - tke_Pr = tke_Pr(:,node), & ! Prantl number + tke_plc = tke_langmuir(nun:nln+1,node), & ! in + tke_Tbpr = tke_Tbpr(nun:nln+1,node), & ! buoyancy production + tke_Tspr = tke_Tspr(nun:nln+1,node), & ! shear production + tke_Tdif = tke_Tdif(nun:nln+1,node), & ! vertical diffusion d/dz(k d/dz)TKE + tke_Tdis = tke_Tdis(nun:nln+1,node), & ! dissipation + tke_Twin = tke_Twin(nun:nln+1,node), & ! wind forcing + tke_Tiwf = tke_Tiwf(nun:nln+1,node), & ! internal wave forcing when idemix is used + tke_Tbck = tke_Tbck(nun:nln+1,node), & ! background forcing only active if IDEMIX is not active, forcing that results from resetting TKE to minimum background TKE value + tke_Ttot = tke_Ttot(nun:nln+1,node), & ! sum of all terms + tke_Lmix = tke_Lmix(nun:nln+1,node), & ! mixing length scale of the TKE scheme + tke_Pr = tke_Pr( nun:nln+1,node), & ! Prantl number ! debugging - cvmix_int_1 = cvmix_dummy_1(:,node), & ! - cvmix_int_2 = cvmix_dummy_2(:,node), & ! - cvmix_int_3 = cvmix_dummy_3(:,node), & ! + cvmix_int_1 = cvmix_dummy_1(nun:nln+1,node), & ! + cvmix_int_2 = cvmix_dummy_2(nun:nln+1,node), & ! + cvmix_int_3 = cvmix_dummy_3(nun:nln+1,node), & ! i = 1, & j = 1, & tstep_count = tstep_count & @@ -375,25 +476,22 @@ subroutine calc_cvmix_tke(mesh) tke_Av(nln+1,node)=0.0_WP tke_Kv(nln+1,node)=0.0_WP - !!PS tke_Av(1,node)=0.0_WP - !!PS tke_Kv(1,node)=0.0_WP - tke_Av(nun,node)=0.0_WP - tke_Kv(nun,node)=0.0_WP + tke_Av(nun ,node)=0.0_WP + tke_Kv(nun ,node)=0.0_WP end do !--> do node = 1,node_size !_______________________________________________________________________ ! write out diffusivity - call exchange_nod(tke_Kv) + call exchange_nod(tke_Kv, partit) Kv = tke_Kv !_______________________________________________________________________ ! write out viscosity -->interpolate therefor from nodes to elements - call exchange_nod(tke_Av) !Warning: don't forget to communicate before averaging on elements!!! + call exchange_nod(tke_Av, partit) !Warning: don't forget to communicate before averaging on elements!!! Av = 0.0_WP do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) - !!PS do nz=2,nlevels(elem)-1 do nz=ulevels(elem)+1,nlevels(elem)-1 Av(nz,elem) = sum(tke_Av(nz,elnodes))/3.0_WP ! (elementwise) end do diff --git a/src/gen_modules_diag.F90 b/src/gen_modules_diag.F90 index 7dc0540b8..d9f924a0c 100755 --- a/src/gen_modules_diag.F90 +++ b/src/gen_modules_diag.F90 @@ -2,24 +2,29 @@ module diagnostics use g_config use mod_mesh - use g_parsup + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN use g_clock use g_comm_auto use o_ARRAYS use g_forcing_arrays - use i_ARRAYS use o_mixing_KPP_mod use g_rotate_grid use g_support implicit none private -!!PS - public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, & - compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & - u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, v_surf, u_bott, v_bott, & - std_dens_min, std_dens_max, std_dens_N, std_dens, std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, dens_flux, & - compute_diag_dvd_2ndmoment_klingbeil_etal_2014, compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd + public :: ldiag_solver, lcurt_stress_surf, ldiag_energy, ldiag_dMOC, ldiag_DVD, & + ldiag_forc, ldiag_salt3D, ldiag_curl_vel3, diag_list, ldiag_vorticity, & + compute_diagnostics, rhs_diag, curl_stress_surf, curl_vel3, wrhof, rhof, & + u_x_u, u_x_v, v_x_v, v_x_w, u_x_w, dudx, dudy, dvdx, dvdy, dudz, dvdz, & + utau_surf, utau_bott, av_dudz_sq, av_dudz, av_dvdz, stress_bott, u_surf, & + v_surf, u_bott, v_bott, std_dens_min, std_dens_max, std_dens_N, std_dens, & + std_dens_UVDZ, std_dens_DIV, std_dens_Z, std_dens_dVdT, std_dens_flux, & + dens_flux_e, vorticity, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & + compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd ! Arrays used for diagnostics, some shall be accessible to the I/O ! 1. solver diagnostics: A*x=rhs? ! A=ssh_stiff, x=d_eta, rhs=ssh_rhs; rhs_diag=A*x; @@ -31,6 +36,7 @@ module diagnostics real(kind=WP), save, allocatable, target :: dudx(:,:), dudy(:,:), dvdx(:,:), dvdy(:,:), dudz(:,:), dvdz(:,:), av_dudz(:,:), av_dvdz(:,:), av_dudz_sq(:,:) real(kind=WP), save, allocatable, target :: utau_surf(:), utau_bott(:) real(kind=WP), save, allocatable, target :: stress_bott(:,:), u_bott(:), v_bott(:), u_surf(:), v_surf(:) + real(kind=WP), save, allocatable, target :: vorticity(:,:) ! defining a set of standard density bins which will be used for computing densMOC ! integer, parameter :: std_dens_N = 100 @@ -50,7 +56,7 @@ module diagnostics real(kind=WP), save, target :: std_dd(std_dens_N-1) real(kind=WP), save, target :: std_dens_min=1030., std_dens_max=1040. real(kind=WP), save, allocatable, target :: std_dens_UVDZ(:,:,:), std_dens_flux(:,:,:), std_dens_dVdT(:,:), std_dens_DIV(:,:), std_dens_Z(:,:) - real(kind=WP), save, allocatable, target :: dens_flux(:) + real(kind=WP), save, allocatable, target :: dens_flux_e(:) logical :: ldiag_solver =.false. logical :: lcurt_stress_surf=.false. @@ -67,20 +73,29 @@ module diagnostics logical :: ldiag_forc =.false. + logical :: ldiag_vorticity =.false. + namelist /diag_list/ ldiag_solver, lcurt_stress_surf, ldiag_curl_vel3, ldiag_energy, & - ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc + ldiag_dMOC, ldiag_DVD, ldiag_salt3D, ldiag_forc, ldiag_vorticity contains ! ============================================================== !rhs_diag=ssh_rhs? -subroutine diag_solver(mode, mesh) +subroutine diag_solver(mode, dynamics, partit, mesh) implicit none - integer, intent(in) :: mode - integer :: n, is, ie - logical, save :: firstcall=.true. - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh) , intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + integer, intent(in) :: mode + integer :: n, is, ie + logical, save :: firstcall=.true. + real(kind=WP), dimension(:) , pointer :: d_eta +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + d_eta =>dynamics%d_eta(:) !===================== if (firstcall) then !allocate the stuff at the first call @@ -97,15 +112,19 @@ subroutine diag_solver(mode, mesh) end subroutine diag_solver ! ============================================================== !curt(stress_surf) -subroutine diag_curl_stress_surf(mode, mesh) +subroutine diag_curl_stress_surf(mode, partit, mesh) implicit none - integer, intent(in) :: mode - logical, save :: firstcall=.true. - integer :: enodes(2), el(2), ed, n - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer, intent(in) :: mode + logical, save :: firstcall=.true. + integer :: enodes(2), el(2), ed, n + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 !===================== -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (firstcall) then !allocate the stuff at the first call allocate(curl_stress_surf(myDim_nod2D+eDim_nod2D)) @@ -134,20 +153,27 @@ subroutine diag_curl_stress_surf(mode, mesh) end if END DO DO n=1, myDim_nod2D+eDim_nod2D - curl_stress_surf(n)=curl_stress_surf(n)/area(1,n) + !!PS curl_stress_surf(n)=curl_stress_surf(n)/area(1,n) + curl_stress_surf(n)=curl_stress_surf(n)/areasvol(ulevels_nod2D(n),n) END DO end subroutine diag_curl_stress_surf ! ============================================================== !3D curl(velocity) -subroutine diag_curl_vel3(mode, mesh) +subroutine diag_curl_vel3(mode, dynamics, partit, mesh) implicit none - integer, intent(in) :: mode - logical, save :: firstcall=.true. - integer :: enodes(2), el(2), ed, n, nz, nl1, nl2, nl12, nu1, nu2, nu12 - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + integer, intent(in) :: mode + logical, save :: firstcall=.true. + integer :: enodes(2), el(2), ed, n, nz, nl1, nl2, nl12, nu1, nu2, nu12 + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) !===================== if (firstcall) then !allocate the stuff at the first call @@ -209,23 +235,35 @@ subroutine diag_curl_vel3(mode, mesh) DO n=1, myDim_nod2D !!PS DO nz=1, nlevels_nod2D(n)-1 DO nz=ulevels_nod2D(n), nlevels_nod2D(n)-1 - curl_vel3(nz,n)=curl_vel3(nz,n)/area(nz,n) + curl_vel3(nz,n)=curl_vel3(nz,n)/areasvol(nz,n) END DO END DO end subroutine diag_curl_vel3 ! ============================================================== !energy budget -subroutine diag_energy(mode, mesh) +subroutine diag_energy(mode, dynamics, partit, mesh) implicit none - integer, intent(in) :: mode - type(t_mesh), intent(in) , target :: mesh - logical, save :: firstcall=.true. + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + integer, intent(in) :: mode + logical, save :: firstcall=.true. integer :: n, nz, k, i, elem, nzmax, nzmin, elnodes(3) integer :: iup, ilo real(kind=WP) :: ux, vx, uy, vy, tvol, rval(2) real(kind=WP) :: geo_grad_x(3), geo_grad_y(3), geo_u(3), geo_v(3) + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode + real(kind=WP), dimension(:,:), pointer :: Wvel + real(kind=WP) :: zbar_n(mesh%nl), Z_n(mesh%nl-1) + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + UVnode=> dynamics%uvnode(:,:,:) + Wvel => dynamics%w(:,:) -#include "associate_mesh.h" !===================== if (firstcall) then !allocate the stuff at the first call allocate(wrhof(nl, myDim_nod2D), rhof(nl, myDim_nod2D)) @@ -265,9 +303,9 @@ subroutine diag_energy(mode, mesh) if (mode==0) return end if - u_x_u=Unode(1,1:nl-1,1:myDim_nod2D)*Unode(1,1:nl-1,1:myDim_nod2D) - u_x_v=Unode(1,1:nl-1,1:myDim_nod2D)*Unode(2,1:nl-1,1:myDim_nod2D) - v_x_v=Unode(2,1:nl-1,1:myDim_nod2D)*Unode(2,1:nl-1,1:myDim_nod2D) + u_x_u=UVnode(1,1:nl-1,1:myDim_nod2D)*UVnode(1,1:nl-1,1:myDim_nod2D) + u_x_v=UVnode(1,1:nl-1,1:myDim_nod2D)*UVnode(2,1:nl-1,1:myDim_nod2D) + v_x_v=UVnode(2,1:nl-1,1:myDim_nod2D)*UVnode(2,1:nl-1,1:myDim_nod2D) ! this loop might be very expensive DO n=1, myDim_elem2D nzmax = nlevels(n) @@ -370,10 +408,10 @@ subroutine diag_energy(mode, mesh) if (nlevels(elem)-1 < nz) cycle elnodes=elem2D_nodes(:, elem) tvol=tvol+elem_area(elem) - ux=ux+sum(gradient_sca(1:3,elem)*Unode(1,nz,elnodes))*elem_area(elem) !accumulate tensor of velocity derivatives - vx=vx+sum(gradient_sca(1:3,elem)*Unode(2,nz,elnodes))*elem_area(elem) - uy=uy+sum(gradient_sca(4:6,elem)*Unode(1,nz,elnodes))*elem_area(elem) - vy=vy+sum(gradient_sca(4:6,elem)*Unode(2,nz,elnodes))*elem_area(elem) + ux=ux+sum(gradient_sca(1:3,elem)*UVnode(1,nz,elnodes))*elem_area(elem) !accumulate tensor of velocity derivatives + vx=vx+sum(gradient_sca(1:3,elem)*UVnode(2,nz,elnodes))*elem_area(elem) + uy=uy+sum(gradient_sca(4:6,elem)*UVnode(1,nz,elnodes))*elem_area(elem) + vy=vy+sum(gradient_sca(4:6,elem)*UVnode(2,nz,elnodes))*elem_area(elem) END DO dudx(nz,n)=ux/tvol!/area(nz, n)/3. dvdx(nz,n)=vx/tvol @@ -383,21 +421,32 @@ subroutine diag_energy(mode, mesh) END DO end subroutine diag_energy ! ============================================================== -subroutine diag_densMOC(mode, mesh) +subroutine diag_densMOC(mode, dynamics, tracers, partit, mesh) implicit none - integer, intent(in) :: mode - type(t_mesh), intent(in) , target :: mesh - integer :: nz, snz, elem, nzmax, nzmin, elnodes(3), is, ie, pos - integer :: e, edge, enodes(2), eelems(2) - real(kind=WP) :: div, deltaX, deltaY, locz - integer :: jj - real(kind=WP), save :: dd - real(kind=WP) :: uvdz_el(2), rhoz_el, vol_el, dz, weight, dmin, dmax, ddiff, test, test1, test2, test3 - real(kind=WP), save, allocatable :: dens(:), aux(:), el_depth(:) - real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) - logical, save :: firstcall_s=.true., firstcall_e=.true. - -#include "associate_mesh.h" + integer, intent(in) :: mode + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + integer :: nz, snz, elem, nzmax, nzmin, elnodes(3), is, ie, pos + integer :: e, edge, enodes(2), eelems(2) + real(kind=WP) :: div, deltaX, deltaY, locz + integer :: jj + real(kind=WP), save :: dd + real(kind=WP) :: uvdz_el(2), rhoz_el, vol_el, dz, weight, dmin, dmax, ddiff, test, test1, test2, test3 + real(kind=WP), save, allocatable :: dens(:), aux(:), el_depth(:) + real(kind=WP), save, allocatable :: std_dens_w(:,:), std_dens_VOL1(:,:), std_dens_VOL2(:,:) + logical, save :: firstcall_s=.true., firstcall_e=.true. + real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + fer_UV => dynamics%fer_uv(:,:,:) if (firstcall_s) then !allocate the stuff at the first call allocate(std_dens_UVDZ(2,std_dens_N, myDim_elem2D)) @@ -408,7 +457,7 @@ subroutine diag_densMOC(mode, mesh) allocate(std_dens_VOL2( std_dens_N, myDim_elem2D)) allocate(std_dens_flux(3,std_dens_N, myDim_elem2D)) allocate(std_dens_Z ( std_dens_N, myDim_elem2D)) - allocate(dens_flux(elem2D)) + allocate(dens_flux_e(elem2D)) allocate(aux (nl-1)) allocate(dens (nl)) allocate(el_depth(nl)) @@ -438,7 +487,7 @@ subroutine diag_densMOC(mode, mesh) std_dens_UVDZ=0. std_dens_w =0.! temporat thing for wiighting (ageraging) mean fields within a bin std_dens_flux=0. - dens_flux =0. + dens_flux_e =0. std_dens_VOL2=0. std_dens_DIV =0. std_dens_Z =0. @@ -450,11 +499,11 @@ subroutine diag_densMOC(mode, mesh) ! density flux on elements (although not related to binning it might be usefull for diagnostic and to verify the consistency) do jj=1,3 - dens_flux(elem)= dens_flux(elem) + (sw_alpha(ulevels_nod2D(elnodes(jj)),elnodes(jj)) * heat_flux_in(elnodes(jj)) / vcpw + & + dens_flux_e(elem)=dens_flux_e(elem) + (sw_alpha(ulevels_nod2D(elnodes(jj)),elnodes(jj)) * heat_flux_in(elnodes(jj)) / vcpw + & sw_beta(ulevels_nod2D(elnodes(jj)),elnodes(jj)) * (relax_salt (elnodes(jj)) + water_flux(elnodes(jj)) * & - tr_arr(ulevels_nod2D(elnodes(jj)),elnodes(jj),2))) + salt(ulevels_nod2D(elnodes(jj)),elnodes(jj)))) end do - dens_flux(elem) = dens_flux(elem)/3.0_WP + dens_flux_e(elem) =dens_flux_e(elem)/3.0_WP ! density_dmoc is the sigma_2 density given at nodes. it is computed in oce_ale_pressure_bv do nz=nzmin, nzmax-1 aux(nz)=sum(density_dmoc(nz, elnodes))/3.-1000. @@ -478,7 +527,7 @@ subroutine diag_densMOC(mode, mesh) dd = 0.0_WP do jj=1,3 - dd = dd + (sw_beta (1,elnodes(jj)) * water_flux(elnodes(jj)) * tr_arr(ulevels_nod2D(elnodes(jj)), elnodes(jj), 2)) + dd = dd + (sw_beta (1,elnodes(jj)) * water_flux(elnodes(jj)) * salt(ulevels_nod2D(elnodes(jj)), elnodes(jj))) end do std_dens_flux(3, is,elem)=std_dens_flux(3, is,elem)+elem_area(elem)*dd/3. @@ -629,32 +678,141 @@ subroutine diag_densMOC(mode, mesh) std_dens_VOL1=std_dens_VOL2 firstcall_e=.false. end subroutine diag_densMOC -! ============================================================== +! +! +!_______________________________________________________________________________ +subroutine relative_vorticity(mode, dynamics, partit, mesh) + IMPLICIT NONE + integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 + integer, intent(in) :: mode + logical, save :: firstcall=.true. + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + + !___________________________________________________________________________ + if (firstcall) then !allocate the stuff at the first call + allocate(vorticity(nl-1, myDim_nod2D+eDim_nod2D)) + firstcall=.false. + if (mode==0) return + end if + !!PS DO n=1,myDim_nod2D + !!PS nl1 = nlevels_nod2D(n)-1 + !!PS ul1 = ulevels_nod2D(n) + !!PS vorticity(ul1:nl1,n)=0.0_WP + !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 + !!PS !!PS vorticity(nz,n)=0.0_WP + !!PS !!PS END DO + !!PS END DO + vorticity = 0.0_WP + DO edge=1,myDim_edge2D + !! edge=myList_edge2D(m) + enodes=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + ul1=ulevels(el(1)) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + nl2=0 + ul2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + nl2=nlevels(el(2))-1 + ul2=ulevels(el(2)) + end if + nl12 = min(nl1,nl2) + ul12 = max(ul1,ul2) + + DO nz=ul1,ul12-1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + if (ul2>0) then + DO nz=ul2,ul12-1 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + endif + !!PS DO nz=1,min(nl1,nl2) + DO nz=ul12,nl12 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & + deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl1 + DO nz=nl12+1,nl1 + c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + !!PS DO nz=min(nl1,nl2)+1,nl2 + DO nz=nl12+1,nl2 + c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) + vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 + vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 + END DO + END DO + + ! vorticity = vorticity*area at this stage + ! It is correct only on myDim nodes + DO n=1,myDim_nod2D + !! n=myList_nod2D(m) + ul1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS DO nz=1,nlevels_nod2D(n)-1 + DO nz=ul1,nl1-1 + vorticity(nz,n)=vorticity(nz,n)/areasvol(nz,n) + END DO + END DO + + call exchange_nod(vorticity, partit) + +! Now it the relative vorticity known on neighbors too +end subroutine relative_vorticity + + -subroutine compute_diagnostics(mode, mesh) +! ============================================================== +subroutine compute_diagnostics(mode, dynamics, tracers, partit, mesh) implicit none - integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) - real(kind=WP) :: val - type(t_mesh), intent(in) , target :: mesh - !1. solver diagnostic - if (ldiag_solver) call diag_solver(mode, mesh) + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics + integer, intent(in) :: mode !constructor mode (0=only allocation; any other=do diagnostic) + real(kind=WP) :: val !1. solver diagnostic + if (ldiag_solver) call diag_solver(mode, dynamics, partit, mesh) !2. compute curl(stress_surf) - if (lcurt_stress_surf) call diag_curl_stress_surf(mode, mesh) + if (lcurt_stress_surf) call diag_curl_stress_surf(mode, partit, mesh) !3. compute curl(velocity) - if (ldiag_curl_vel3) call diag_curl_vel3(mode, mesh) + if (ldiag_curl_vel3) call diag_curl_vel3(mode, dynamics, partit, mesh) !4. compute energy budget - if (ldiag_energy) call diag_energy(mode, mesh) + if (ldiag_energy) call diag_energy(mode, dynamics, partit, mesh) !5. print integrated temperature if (ldiag_salt3d) then if (mod(mstep,logfile_outfreq)==0) then - call integrate_nod(tr_arr(:,:,2), val, mesh) - if (mype==0) then + call integrate_nod(tracers%data(2)%values(:,:), val, partit, mesh) + if (partit%mype==0) then write(*,*) 'total integral of salinity at timestep :', mstep, val end if end if end if !6. MOC in density coordinate - if (ldiag_dMOC) call diag_densMOC(mode, mesh) + if (ldiag_dMOC) call diag_densMOC(mode, dynamics, tracers, partit, mesh) + + ! compute relative vorticity + if (ldiag_vorticity) call relative_vorticity(mode, dynamics, partit, mesh) end subroutine compute_diagnostics @@ -669,17 +827,21 @@ end subroutine compute_diagnostics ! in a coastal model application ... ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, mesh) +subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, tracers, partit, mesh) use o_arrays - use g_PARSUP use oce_adv_tra_driver_interfaces implicit none - type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: tr_num + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer, intent(in) :: tr_num integer :: node, nz, nzmin, nzmax - real(kind=WP) :: tr_sqr(mesh%nl-1,myDim_nod2D+eDim_nod2D), trAB_sqr(mesh%nl-1,myDim_nod2D+eDim_nod2D) + real(kind=WP) :: tr_sqr(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D), trAB_sqr(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! square up fields for actual tracers and Adams Bashfort tracer @@ -691,8 +853,8 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, mesh) nzmax = nlevels_nod2D(node)-1 nzmin = ulevels_nod2D(node) do nz = nzmin, nzmax - tr_sqr(nz,node) = tr_arr(nz,node,tr_num)**2 - trAB_sqr(nz,node) = tr_arr_old(nz,node,tr_num)**2 + tr_sqr(nz,node) = tracers%data(tr_num)%values (nz,node)**2 + trAB_sqr(nz,node) = tracers%data(tr_num)%valuesAB(nz,node)**2 end do end do @@ -700,9 +862,10 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, mesh) ! calculate horizintal and vertical advection for squared tracer (2nd moments) ! see Burchard and Rennau, 2008, Comparative quantification of physically and ! numerically induced mixing in ocean models ... - del_ttf_advhoriz = 0.0_WP - del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(tr_sqr, trAB_sqr, UV, wvel, wvel_i, wvel_e, 1, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) + tracers%work%del_ttf_advhoriz = 0.0_WP + tracers%work%del_ttf_advvert = 0.0_WP +! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? +! call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tr_sqr, trAB_sqr, 1, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, partit, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D @@ -721,8 +884,8 @@ subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num, mesh) ! --> split it up in DVD contribution from horizontal and vertical ! advection since for the horizontal advection Adams Bashfort tracer ! are used and for the vertical the normal tracer values. - tr_dvd_horiz(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*trAB_sqr(nz,node) - del_ttf_advhoriz(nz,node)/hnode_new(nz,node) - tr_dvd_vert(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*tr_sqr( nz,node) - del_ttf_advvert( nz,node)/hnode_new(nz,node) + tracers%work%tr_dvd_horiz(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*trAB_sqr(nz,node) - tracers%work%del_ttf_advhoriz(nz,node)/hnode_new(nz,node) + tracers%work%tr_dvd_vert (nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*tr_sqr( nz,node) - tracers%work%del_ttf_advvert( nz,node)/hnode_new(nz,node) end do end do end subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008 @@ -735,23 +898,28 @@ end subroutine compute_diag_dvd_2ndmoment_burchard_etal_2008 ! see: ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, mesh) +subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, tracers, partit, mesh) use o_arrays - use g_PARSUP use oce_adv_tra_driver_interfaces implicit none - integer, intent(in) :: tr_num - integer :: node, nz, nzmin, nzmax - type(t_mesh), intent(in), target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: node, nz, nzmin, nzmax + integer, intent(in) :: tr_num + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! calculate horizintal and vertical advection for squared tracer (2nd moments) ! see Burchard and Rennau, 2008, Comparative quantification of physically and ! numerically induced mixing in ocean models ... - del_ttf_advhoriz = 0.0_WP - del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(tr_arr(:,:,tr_num), tr_arr_old(:,:,tr_num), UV, wvel, wvel_i, wvel_e, 2, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) + tracers%work%del_ttf_advhoriz = 0.0_WP + tracers%work%del_ttf_advvert = 0.0_WP +! maybe just to introduce an another tharer of t_tracer type with **do_Xmoment? +! call do_oce_adv_tra(dt, UV, wvel, wvel_i, wvel_e, tracers%data(tr_num)%values, tracers%data(tr_num)%valuesAB(:,:), 2, tracers%work%del_ttf_advhoriz, tracers%work%del_ttf_advvert, tra_adv_ph, tra_adv_pv, partit, mesh) !___________________________________________________________________________ ! add target second moment to DVD do node = 1,mydim_nod2D @@ -774,10 +942,10 @@ subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, mesh) ! --> split it up in DVD contribution from horizontal and vertical ! advection since for the horizontal advection Adams Bashfort tracer ! are used and for the vertical the normal tracer values. - tr_dvd_horiz(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*(tr_arr_old(nz,node,tr_num)**2) & - - del_ttf_advhoriz(nz,node)/hnode_new(nz,node) - tr_dvd_vert(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*(tr_arr( nz,node,tr_num)**2) & - - del_ttf_advvert( nz,node)/hnode_new(nz,node) + tracers%work%tr_dvd_horiz(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*(tracers%data(tr_num)%valuesAB(nz,node)**2) & + - tracers%work%del_ttf_advhoriz(nz,node)/hnode_new(nz,node) + tracers%work%tr_dvd_vert(nz,node,tr_num) = hnode(nz,node)/hnode_new(nz,node)*(tracers%data(tr_num)%values (nz,node)**2) & + - tracers%work%del_ttf_advvert( nz,node)/hnode_new(nz,node) end do end do end subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014 @@ -792,17 +960,21 @@ end subroutine compute_diag_dvd_2ndmoment_klingbeil_etal_2014 ! in a coastal model application ... ! Klingbeil et al., 2014, Quantification of spurious dissipation and mixing – ! Discrete variance decay in a Finite-Volume framework ... -subroutine compute_diag_dvd(tr_num, mesh) +subroutine compute_diag_dvd(tr_num, tracers, partit, mesh) use g_config, only: dt - use o_arrays - use g_PARSUP - + use o_arrays implicit none - integer, intent(in) :: tr_num - integer :: node, nz, nzmin, nzmax - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: node, nz, nzmin, nzmax + integer, intent(in) :: tr_num + -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! add discret second moment to DVD do node = 1,mydim_nod2D @@ -818,17 +990,17 @@ subroutine compute_diag_dvd(tr_num, mesh) ! | ! v ! now add this part - ! --> tr_dvd_horiz contains already the expected target second moments + ! --> tracers%work%tr_dvd_horiz contains already the expected target second moments ! from subroutine compute_diag_dvd_2ndmoment - tr_dvd_horiz(nz,node,tr_num) = (tr_dvd_horiz(nz,node,tr_num) & - -( hnode(nz,node)/hnode_new(nz,node)*tr_arr_old(nz,node,tr_num) & - -del_ttf_advhoriz(nz,node)/hnode_new(nz,node) & - )**2 & + tracers%work%tr_dvd_horiz(nz,node,tr_num) = (tracers%work%tr_dvd_horiz(nz,node,tr_num) & + -( hnode(nz,node)/hnode_new(nz,node)*tracers%data(tr_num)%valuesAB(nz,node) & + -tracers%work%del_ttf_advhoriz(nz,node)/hnode_new(nz,node) & + )**2 & )/dt - tr_dvd_vert(nz,node,tr_num) = (tr_dvd_vert(nz,node,tr_num) & - -( hnode(nz,node)/hnode_new(nz,node)*tr_arr( nz,node,tr_num) & - -del_ttf_advvert( nz,node)/hnode_new(nz,node) & - )**2 & + tracers%work%tr_dvd_vert(nz,node,tr_num) = (tracers%work%tr_dvd_vert(nz,node,tr_num) & + -( hnode(nz,node)/hnode_new(nz,node)*tracers%data(tr_num)%values (nz,node) & + -tracers%work%del_ttf_advvert( nz,node)/hnode_new(nz,node) & + )**2 & )/dt end do end do diff --git a/src/gen_modules_forcing.F90 b/src/gen_modules_forcing.F90 index 2e2f67dca..d3f633ea7 100755 --- a/src/gen_modules_forcing.F90 +++ b/src/gen_modules_forcing.F90 @@ -59,13 +59,16 @@ module g_forcing_arrays real(kind=WP), allocatable, dimension(:) :: runoff, evaporation, ice_sublimation real(kind=WP), allocatable, dimension(:) :: cloudiness, press_air -#if defined (__oasis) +#if defined (__oasis) || defined (__ifsinterface) /* todo: use a single shared definition */ real(kind=WP), target, allocatable, dimension(:) :: sublimation, evap_no_ifrac +#endif +#if defined (__oasis) real(kind=WP), target, allocatable, dimension(:) :: tmp_sublimation, tmp_evap_no_ifrac !temporary flux fields real(kind=WP), target, allocatable, dimension(:) :: tmp_shortwave !(for flux correction) real(kind=WP), allocatable, dimension(:) :: atm_net_fluxes_north, atm_net_fluxes_south real(kind=WP), allocatable, dimension(:) :: oce_net_fluxes_north, oce_net_fluxes_south real(kind=WP), allocatable, dimension(:) :: flux_correction_north, flux_correction_south, flux_correction_total + real(kind=WP), allocatable, dimension(:) :: residualifwflx #endif real(kind=WP), allocatable, dimension(:) :: runoff_landice @@ -75,7 +78,8 @@ module g_forcing_arrays real(kind=WP), allocatable, dimension(:) :: chl real(kind=WP), allocatable, dimension(:,:) :: sw_3d - real(kind=WP), allocatable, dimension(:) :: thdgr, thdgrsn, flice +! real(kind=WP), allocatable, dimension(:) :: thdgr, thdgrsn + real(kind=WP), allocatable, dimension(:) :: flice real(kind=WP), allocatable, dimension(:) :: olat_heat, osen_heat, olwout real(kind=WP), allocatable, dimension(:) :: real_salt_flux !PS diff --git a/src/gen_modules_gpot.F90 b/src/gen_modules_gpot.F90 index 439197a7f..d62414902 100644 --- a/src/gen_modules_gpot.F90 +++ b/src/gen_modules_gpot.F90 @@ -23,9 +23,10 @@ MODULE mo_tidal USE o_PARAM USE o_ARRAYS, only : ssh_gp - USE mod_mesh + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP USE g_config, only : dt - USE g_PARSUP USE g_clock IMPLICIT NONE !Earth Tides ( maik thomas, emr pers. comm ) @@ -36,14 +37,14 @@ MODULE mo_tidal CONTAINS - SUBROUTINE foreph_ini(lyear,lmonth) + SUBROUTINE foreph_ini(lyear,lmonth, partit) ! Initialization of tidal module ! Determination of Julian Day of first time step ! Projection of mpiom grid on tidal module internal coordinates IMPLICIT NONE - - INTEGER,INTENT(IN)::lyear,lmonth + type(t_partit), intent(in) :: partit + INTEGER,INTENT(IN) :: lyear,lmonth INTEGER :: i, j, jcc, moph mmccdt = 0; jcc = 0; moph = 0 @@ -57,7 +58,7 @@ SUBROUTINE foreph_ini(lyear,lmonth) ! FIXME : replace eph by a some to code that directly calculates julian days and ! centuries as needed by siderial time and ephemerides - if (mype==0) WRITE(*,*)'tidal: phase relative to 2000 :' & + if (partit%mype==0) WRITE(*,*)'tidal: phase relative to 2000 :' & ,'year= ',lyear, 'month= ',lmonth, 'yearoff= ',jcc,' monoff= ',moph ,'mmccdt= ',mmccdt END SUBROUTINE foreph_ini @@ -107,18 +108,22 @@ SUBROUTINE eph(jul,mon,jahrph,moph) END SUBROUTINE eph - SUBROUTINE foreph(mesh) + SUBROUTINE foreph(partit, mesh) ! calculates the realtime gravitational potential of sun & moon ! output: ssh_gp (with Body Earth Tide effect) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit REAL(WP) :: dres(3,2),crim3,rkomp,erdrad,rekts,dekls REAL(WP) :: cris3,rektm,deklm,deklm2,dekls2,sidm,sidmq REAL(WP) :: rkosp,codm,codmq,sids,sidsq,cods,codsq,sidm2 REAL(WP) :: sids2,hamp,hasp INTEGER :: i,j -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" mmccdt = mmccdt + 1 diff --git a/src/gen_modules_partitioning.F90 b/src/gen_modules_partitioning.F90 index a914a9d51..dc727700c 100644 --- a/src/gen_modules_partitioning.F90 +++ b/src/gen_modules_partitioning.F90 @@ -1,103 +1,63 @@ -!========================================================== -module g_PARSUP -USE o_PARAM -! Variables to organize parallel work -implicit none -save - -#ifdef PETSC -#include "finclude/petsc.h" -#else - include 'mpif.h' -#endif - - integer :: MPI_COMM_FESOM - integer, parameter :: MAX_LAENDERECK=16 - integer, parameter :: MAX_NEIGHBOR_PARTITIONS=32 - type com_struct - integer :: rPEnum ! the number of PE I receive info from - integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: rPE ! their list - integer, dimension(MAX_NEIGHBOR_PARTITIONS+1) :: rptr ! allocatables to the list of nodes - integer, dimension(:), allocatable :: rlist ! the list of nodes - integer :: sPEnum ! send part - integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sPE - integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sptr - integer, dimension(:), allocatable :: slist - integer, dimension(:), allocatable :: req ! request for MPI_Wait - integer :: nreq ! number of requests for MPI_Wait - ! (to combine halo exchange of several fields) - end type com_struct - - type(com_struct) :: com_nod2D -!!$ type(com_struct) :: com_edge2D - type(com_struct), target :: com_elem2D - type(com_struct), target :: com_elem2D_full - - ! MPI Datatypes for interface exchange - - ! Edge fields (2D) - integer, allocatable :: s_mpitype_edge2D(:), r_mpitype_edge2D(:) - - ! Element fields (2D; 2D integer; 3D with nl-1 or nl levels, 1 - 4 values) - ! small halo and / or full halo - integer, allocatable, target :: s_mpitype_elem2D(:,:), r_mpitype_elem2D(:,:) - integer, allocatable :: s_mpitype_elem2D_full_i(:), r_mpitype_elem2D_full_i(:) - integer, allocatable, target :: s_mpitype_elem2D_full(:,:), r_mpitype_elem2D_full(:,:) - integer, allocatable, target :: s_mpitype_elem3D(:,:,:), r_mpitype_elem3D(:,:,:) - integer, allocatable, target :: s_mpitype_elem3D_full(:,:,:),r_mpitype_elem3D_full(:,:,:) - - ! Nodal fields (2D; 2D integer; 3D with nl-1 or nl levels, one, two, or three values) - integer, allocatable :: s_mpitype_nod2D(:), r_mpitype_nod2D(:) - integer, allocatable :: s_mpitype_nod2D_i(:), r_mpitype_nod2D_i(:) - integer, allocatable :: s_mpitype_nod3D(:,:,:), r_mpitype_nod3D(:,:,:) - - ! general MPI part - integer :: MPIERR - integer :: npes - integer :: mype - integer :: maxPEnum=100 - integer, allocatable, dimension(:) :: part - - ! Mesh partition - integer :: myDim_nod2D, eDim_nod2D - integer, allocatable, dimension(:) :: myList_nod2D - integer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D - integer, allocatable, dimension(:) :: myList_elem2D - integer :: myDim_edge2D, eDim_edge2D - integer, allocatable, dimension(:) :: myList_edge2D - - integer :: pe_status = 0 ! if /=0 then something is wrong - - integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) - integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) - - logical :: elem_full_flag -!$OMP threadprivate(com_nod2D,com_elem2D,com_elem2D_full) -!$OMP threadprivate(mype) -!$OMP threadprivate(myDim_nod2D, eDim_nod2D, myList_nod2D) -!$OMP threadprivate(myDim_elem2D, eDim_elem2D, eXDim_elem2D, myList_elem2D) -!$OMP threadprivate(myDim_edge2D, eDim_edge2D, myList_edge2D) - - -contains -subroutine par_init ! initializes MPI +module mod_parsup + interface + subroutine par_ex(COMM, mype, abort) + USE MOD_PARTIT + implicit none + integer, intent(in) :: COMM + integer, intent(in) :: mype + integer, optional, intent(in) :: abort + end subroutine + end interface +end module mod_parsup + +module par_support_interfaces + interface + subroutine par_init(partit) + USE o_PARAM + USE MOD_PARTIT + USE MOD_PARSUP + implicit none + type(t_partit), intent(inout), target :: partit + end subroutine + + subroutine init_mpi_types(partit, mesh) + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + implicit none + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine init_gatherLists(partit) + USE MOD_PARTIT + USE MOD_PARSUP + implicit none + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + +subroutine par_init(partit) ! initializes MPI + USE o_PARAM + USE MOD_PARTIT + USE MOD_PARSUP implicit none + type(t_partit), intent(inout), target :: partit + integer :: i + integer :: provided_mpi_thread_support_level + character(:), allocatable :: provided_mpi_thread_support_level_name - - integer :: i - integer provided_mpi_thread_support_level - character(:), allocatable :: provided_mpi_thread_support_level_name - -#ifndef __oasis - call MPI_Comm_Size(MPI_COMM_WORLD,npes,i) - call MPI_Comm_Rank(MPI_COMM_WORLD,mype,i) - MPI_COMM_FESOM=MPI_COMM_WORLD +#if defined __oasis || defined __ifsinterface + ! use comm from coupler or ifs #else - call MPI_Comm_Size(MPI_COMM_FESOM,npes,i) - call MPI_Comm_Rank(MPI_COMM_FESOM,mype,i) + partit%MPI_COMM_FESOM=MPI_COMM_WORLD ! use global comm if not coupled (e.g. no __oasis or __ifsinterface) #endif + call MPI_Comm_Size(partit%MPI_COMM_FESOM,partit%npes,i) + call MPI_Comm_Rank(partit%MPI_COMM_FESOM,partit%mype,i) + - if(mype==0) then + if(partit%mype==0) then call MPI_Query_thread(provided_mpi_thread_support_level, i) if(provided_mpi_thread_support_level == MPI_THREAD_SINGLE) then provided_mpi_thread_support_level_name = "MPI_THREAD_SINGLE" @@ -112,67 +72,83 @@ subroutine par_init ! initializes MPI end if write(*,*) 'MPI has been initialized, provided MPI thread support level: ', & provided_mpi_thread_support_level_name,provided_mpi_thread_support_level - write(*, *) 'Running on ', npes, ' PEs' + write(*, *) 'Running on ', partit%npes, ' PEs' +#if defined(_OPENMP) + write(*, *) 'This is MPI/OpenMP run, with ', OMP_GET_MAX_THREADS(), ' threads per PE' +#endif end if end subroutine par_init !================================================================= -subroutine par_ex(abort) ! finalizes MPI +subroutine par_ex(COMM, mype, abort) ! finalizes MPI +USE MOD_PARTIT #ifndef __oifs !For standalone and coupled ECHAM runs #if defined (__oasis) use mod_prism #endif implicit none - integer,optional :: abort + integer, intent(in) :: COMM + integer, intent(in) :: mype + integer, optional, intent(in) :: abort + integer :: error #ifndef __oasis if (present(abort)) then if (mype==0) write(*,*) 'Run finished unexpectedly!' - call MPI_ABORT( MPI_COMM_FESOM, 1 ) + call MPI_ABORT(COMM, 1 ) else - call MPI_Barrier(MPI_COMM_FESOM,MPIerr) - call MPI_Finalize(MPIerr) + call MPI_Barrier(COMM, error) + call MPI_Finalize(error) endif #else if (.not. present(abort)) then if (mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' - call MPI_Barrier(MPI_COMM_WORLD, MPIerr) + call MPI_Barrier(MPI_COMM_WORLD, error) end if - call prism_terminate_proto(MPIerr) + call prism_terminate_proto(error) if (mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' - call MPI_Barrier(MPI_COMM_WORLD, MPIerr) + call MPI_Barrier(MPI_COMM_WORLD, error) if (mype==0) print *, 'FESOM calls MPI_Finalize' - call MPI_Finalize(MPIerr) + call MPI_Finalize(error) #endif if (mype==0) print *, 'fesom should stop with exit status = 0' #endif #if defined (__oifs) -!OIFS coupling doesnt call prism_terminate_proto and uses MPI_COMM_FESOM +!OIFS coupling doesnt call prism_terminate_proto and uses COMM instead of MPI_COMM_WORLD implicit none - integer,optional :: abort + integer, intent(in) :: COMM + integer, intent(in) :: mype + integer, optional, intent(in) :: abort + integer :: error if (present(abort)) then - if (mype==0) write(*,*) 'Run finished unexpectedly!' - call MPI_ABORT( MPI_COMM_FESOM, 1 ) + if (mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT(COMM, 1 ) else - call MPI_Barrier(MPI_COMM_FESOM,MPIerr) - call MPI_Finalize(MPIerr) + call MPI_Barrier(COMM, error) + call MPI_Finalize(error) endif #endif end subroutine par_ex !======================================================================= -subroutine set_par_support(mesh) +subroutine init_mpi_types(partit, mesh) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh integer :: n, offset integer :: i, max_nb, nb, nini, nend, nl1, n_val integer, allocatable :: blocklen(:), displace(:) integer, allocatable :: blocklen_tmp(:), displace_tmp(:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! ! In the distributed memory version, most of the job is already done ! at the initialization phase and is taken into account in read_mesh @@ -184,30 +160,25 @@ subroutine set_par_support(mesh) !================================================ ! MPI REQUEST BUFFERS !================================================ - allocate(com_nod2D%req( 3*com_nod2D%rPEnum + 3*com_nod2D%sPEnum)) - allocate(com_elem2D%req( 3*com_elem2D%rPEnum + 3*com_elem2D%sPEnum)) - allocate(com_elem2D_full%req(3*com_elem2D_full%rPEnum + 3*com_elem2D_full%sPEnum)) - + if (.not. allocated(com_nod2D%req)) allocate(com_nod2D%req( 3*com_nod2D%rPEnum + 3*com_nod2D%sPEnum)) + if (.not. allocated(com_elem2D%req)) allocate(com_elem2D%req( 3*com_elem2D%rPEnum + 3*com_elem2D%sPEnum)) + if (.not. allocated(com_elem2D_full%req)) allocate(com_elem2D_full%req(3*com_elem2D_full%rPEnum + 3*com_elem2D_full%sPEnum)) !================================================ ! MPI DATATYPES !================================================ - ! Build MPI Data types for halo exchange: Elements - allocate(r_mpitype_elem2D(com_elem2D%rPEnum,4)) ! 2D, small halo - allocate(s_mpitype_elem2D(com_elem2D%sPEnum,4)) - allocate(r_mpitype_elem2D_full_i(com_elem2D_full%rPEnum)) ! 2D, wide halo, integer - allocate(s_mpitype_elem2D_full_i(com_elem2D_full%sPEnum)) - - allocate(r_mpitype_elem2D_full(com_elem2D_full%rPEnum,4)) ! 2D, wide halo - allocate(s_mpitype_elem2D_full(com_elem2D_full%sPEnum,4)) - - allocate(r_mpitype_elem3D(com_elem2D%rPEnum, nl-1:nl,4)) ! 3D, small halo - allocate(s_mpitype_elem3D(com_elem2D%sPEnum, nl-1:nl,4)) - - allocate(r_mpitype_elem3D_full(com_elem2D_full%rPEnum, nl-1:nl,4)) ! 3D, wide halo - allocate(s_mpitype_elem3D_full(com_elem2D_full%sPEnum, nl-1:nl,4)) - - + allocate(partit%r_mpitype_elem2D(com_elem2D%rPEnum,4)) ! 2D, small halo + allocate(partit%s_mpitype_elem2D(com_elem2D%sPEnum,4)) + allocate(partit%r_mpitype_elem2D_full_i(com_elem2D_full%rPEnum)) ! 2D, wide halo, integer + allocate(partit%s_mpitype_elem2D_full_i(com_elem2D_full%sPEnum)) + allocate(partit%r_mpitype_elem2D_full(com_elem2D_full%rPEnum,4)) ! 2D, wide halo + allocate(partit%s_mpitype_elem2D_full(com_elem2D_full%sPEnum,4)) + allocate(partit%r_mpitype_elem3D(com_elem2D%rPEnum, nl-1:nl,4)) ! 3D, small halo + allocate(partit%s_mpitype_elem3D(com_elem2D%sPEnum, nl-1:nl,4)) + allocate(partit%r_mpitype_elem3D_full(com_elem2D_full%rPEnum, nl-1:nl,4)) ! 3D, wide halo + allocate(partit%s_mpitype_elem3D_full(com_elem2D_full%sPEnum, nl-1:nl,4)) +!after the allocation we just reassotiate ALL pointers again here +#include "associate_part_ass.h" ! Upper limit for the length of the local interface between the neighbor PEs max_nb = max( & maxval(com_elem2D%rptr(2:com_elem2D%rPEnum+1) - com_elem2D%rptr(1:com_elem2D%rPEnum)), & @@ -251,6 +222,12 @@ subroutine set_par_support(mesh) blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + ! r_mpitype_elem3D shape is e.g. 7,2,4 and its bounds 1:7,1:2,1:4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS + ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) + if(.not. (all(lbound(r_mpitype_elem3D) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D) .ge. [n,nl1,n_val])) ) then + print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D), __FILE__,__LINE__ + stop 1 + end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & r_mpitype_elem3D(n,nl1,n_val), MPIerr) @@ -331,6 +308,12 @@ subroutine set_par_support(mesh) blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + ! r_mpitype_elem3D shape is e.g. 7,2,4 and its bounds 1:7,1:2,1:4 but the args n,nl1,n_val are 1,47,1 and thus OUT OF BOUNDS + ! the second dimension of r_mpitype_elem3D is probably always 2 (from nl-1 to nl) + if(.not. (all(lbound(r_mpitype_elem3D_full) .le. [n,nl1,n_val]) .and. all(ubound(r_mpitype_elem3D_full) .ge. [n,nl1,n_val])) ) then + print *,"out of bounds error, lbound:",lbound(r_mpitype_elem3D_full), "indices:", n,nl1,n_val, "ubound:", ubound(r_mpitype_elem3D_full), __FILE__,__LINE__ + stop 1 + end if call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & r_mpitype_elem3D_full(n,nl1,n_val), MPIerr) @@ -384,13 +367,15 @@ subroutine set_par_support(mesh) ! Build MPI Data types for halo exchange: Nodes - allocate(r_mpitype_nod2D(com_nod2D%rPEnum)) ! 2D - allocate(s_mpitype_nod2D(com_nod2D%sPEnum)) - allocate(r_mpitype_nod2D_i(com_nod2D%rPEnum)) ! 2D integer - allocate(s_mpitype_nod2D_i(com_nod2D%sPEnum)) + allocate(partit%r_mpitype_nod2D(com_nod2D%rPEnum)) ! 2D + allocate(partit%s_mpitype_nod2D(com_nod2D%sPEnum)) + allocate(partit%r_mpitype_nod2D_i(com_nod2D%rPEnum)) ! 2D integer + allocate(partit%s_mpitype_nod2D_i(com_nod2D%sPEnum)) - allocate(r_mpitype_nod3D(com_nod2D%rPEnum,nl-1:nl,3)) ! 3D with nl-1 or nl layers, 1-3 values - allocate(s_mpitype_nod3D(com_nod2D%sPEnum,nl-1:nl,3)) + allocate(partit%r_mpitype_nod3D(com_nod2D%rPEnum,nl-1:nl,3)) ! 3D with nl-1 or nl layers, 1-3 values + allocate(partit%s_mpitype_nod3D(com_nod2D%sPEnum,nl-1:nl,3)) +!after the allocation we just reassotiate ALL pointers again here +#include "associate_part_ass.h" ! Upper limit for the length of the local interface between the neighbor PEs max_nb = max(maxval(com_nod2D%rptr(2:com_nod2D%rPEnum+1) - com_nod2D%rptr(1:com_nod2D%rPEnum)), & @@ -481,30 +466,26 @@ subroutine set_par_support(mesh) deallocate(blocklen, displace) deallocate(blocklen_tmp, displace_tmp) - endif - - call init_gatherLists - if(mype==0) write(*,*) 'Communication arrays are set' -end subroutine set_par_support - - +end subroutine init_mpi_types !=================================================================== -subroutine init_gatherLists - - use o_MESH +subroutine init_gatherLists(partit) + USE MOD_PARTIT + USE MOD_PARSUP implicit none - - integer :: n2D, e2D, sum_loc_elem2D - integer :: n, estart, nstart - + type(t_partit), intent(inout), target :: partit + integer :: n2D, e2D, sum_loc_elem2D + integer :: n, estart, nstart +#include "associate_part_def.h" +#include "associate_part_ass.h" if (mype==0) then if (npes > 1) then - allocate(remPtr_nod2D(npes)) - allocate(remPtr_elem2D(npes)) - + allocate(partit%remPtr_nod2D(npes)) + allocate(partit%remPtr_elem2D(npes)) +!reassociate the pointers to the just allocated arrays +#include "associate_part_ass.h" remPtr_nod2D(1) = 1 remPtr_elem2D(1) = 1 @@ -516,12 +497,12 @@ subroutine init_gatherLists remPtr_elem2D(n+1) = remPtr_elem2D(n) + e2D enddo - - - allocate(remList_nod2D(remPtr_nod2D(npes))) ! this should be nod2D - myDim_nod2D - allocate(remList_elem2D(remPtr_elem2D(npes))) ! this is > elem2D, because the elements overlap. + allocate(partit%remList_nod2D(remPtr_nod2D(npes))) ! this should be nod2D - myDim_nod2D + allocate(partit%remList_elem2D(remPtr_elem2D(npes))) ! this is > elem2D, because the elements overlap. ! Consider optimization: avoid multiple communication ! of the same elem from different PEs. +!reassociate the pointers to the just allocated arrays +#include "associate_part_ass.h" do n=1, npes-1 nstart = remPtr_nod2D(n) @@ -543,8 +524,33 @@ subroutine init_gatherLists call MPI_SEND(myList_elem2D, myDim_elem2D, MPI_INTEGER, 0, 3, MPI_COMM_FESOM, MPIerr ) endif - +!$OMP MASTER +#if defined(_OPENMP) + allocate(partit%plock(myDim_elem2D+eDim_elem2D)) !allocate with maximum dimention (nELEM> nNODE) + do n=1, myDim_elem2D+eDim_elem2D +!experiments showd that OPENMP5 implementation of the lock (201811) is >10% more efficient +!make sure you use OPENMP v. 5.0 +#if _OPENMP >= 201811 + call omp_init_lock_with_hint(partit%plock(n),omp_sync_hint_speculative+omp_sync_hint_uncontended) +#else + call omp_init_lock(partit%plock(n)) +#endif + enddo +#endif +!$OMP END MASTER end subroutine init_gatherLists +!=================================================================== +subroutine status_check(partit) +USE MOD_PARTIT +USE MOD_PARSUP +implicit none +type(t_partit), intent(inout), target :: partit +integer :: res +res=0 +call MPI_Allreduce (partit%pe_status, res, 1, MPI_INTEGER, MPI_SUM, partit%MPI_COMM_FESOM, partit%MPIerr) +if (res /= 0 ) then + if (partit%mype==0) write(*,*) 'Something Broke. Flushing and stopping...' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) +endif +end subroutine status_check - -end module g_PARSUP diff --git a/src/gen_modules_read_NetCDF.F90 b/src/gen_modules_read_NetCDF.F90 index 919830b14..3213e808d 100755 --- a/src/gen_modules_read_NetCDF.F90 +++ b/src/gen_modules_read_NetCDF.F90 @@ -3,7 +3,7 @@ ! module g_read_other_NetCDF contains -subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, mesh) +subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, partit, mesh) ! Read 2D data and interpolate to the model grid. ! Currently used to read runoff and SSS. ! First, missing values are filled in on the raw regular grid; @@ -16,11 +16,13 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, mesh use g_config use o_param USE MOD_MESH - use g_parsup + USE MOD_PARTIT + USE MOD_PARSUP implicit none #include "netcdf.inc" - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: i, j, ii, jj, k, n, num, flag, cnt integer :: itime, latlen, lonlen integer :: status, ncid, varid @@ -30,13 +32,17 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, mesh real(real64), allocatable :: lon(:), lat(:) real(real64), allocatable :: ncdata(:,:), ncdata_temp(:,:) real(real64), allocatable :: temp_x(:), temp_y(:) - real(real64) :: model_2Darray(myDim_nod2d+eDim_nod2D) + real(real64) :: model_2Darray(partit%myDim_nod2d+partit%eDim_nod2D) character(*) :: vari character(*) :: file logical :: check_dummy integer :: ierror ! return error code -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + if (mype==0) then ! open file @@ -47,7 +53,7 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, mesh if (status.ne.nf_noerr)then print*,'ERROR: CANNOT READ runoff FILE CORRECTLY !!!!!' print*,'Error in opening netcdf file'//file - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif @@ -144,13 +150,13 @@ subroutine read_other_NetCDF(file, vari, itime, model_2Darray, check_dummy, mesh ! interpolation flag=0 call interp_2d_field(lonlen, latlen, lon, lat, ncdata, num, temp_x, temp_y, & - model_2Darray, flag) + model_2Darray, flag, partit) deallocate(temp_y, temp_x, ncdata_temp, ncdata, lon, lat) end subroutine read_other_NetCDF ! !------------------------------------------------------------------------------------ ! - subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, mesh) +subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, partit, mesh) ! Read WOA (NetCDF) surface T/S and interpolate to the model grid. ! Currently used for surface restoring in case of ocean-alone models ! Calling interp_2d_field_v2 to do interpolation, which also treats the dummy value. @@ -160,12 +166,13 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, mesh) use g_config use o_param USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use g_rotate_grid - use g_parsup implicit none - #include "netcdf.inc" - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: i, j, n, num integer :: itime, latlen, lonlen integer :: status, ncid, varid @@ -175,13 +182,16 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, mesh) real(real64), allocatable :: lon(:), lat(:) real(real64), allocatable :: ncdata(:,:) real(real64), allocatable :: temp_x(:), temp_y(:) - real(real64) :: model_2Darray(myDim_nod2d+eDim_nod2D) + real(real64) :: model_2Darray(partit%myDim_nod2d+partit%eDim_nod2D) character(15) :: vari character(300) :: file logical :: check_dummy integer :: ierror ! return error code -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (mype==0) then ! open file @@ -192,7 +202,7 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, mesh) if (status.ne.nf_noerr)then print*,'ERROR: CANNOT READ runoff FILE CORRECTLY !!!!!' print*,'Error in opening netcdf file'//file - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif @@ -265,36 +275,41 @@ subroutine read_surf_hydrography_NetCDF(file, vari, itime, model_2Darray, mesh) end do ! interpolation call interp_2d_field_v2(lonlen, latlen, lon, lat, ncdata, miss, & - num, temp_x, temp_y, model_2Darray) + num, temp_x, temp_y, model_2Darray, partit) deallocate(temp_y, temp_x, ncdata, lon, lat) end subroutine read_surf_hydrography_NetCDF ! !------------------------------------------------------------------------------------ ! -subroutine read_2ddata_on_grid_NetCDF(file, vari, itime, model_2Darray, mesh) +subroutine read_2ddata_on_grid_NetCDF(file, vari, itime, model_2Darray, partit, mesh) use, intrinsic :: ISO_FORTRAN_ENV use g_config use o_param USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use g_rotate_grid - use g_parsup implicit none #include "netcdf.inc" - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n, i integer :: itime integer :: status, ncid, varid integer :: istart(2), icount(2) real(real64) :: ncdata(mesh%nod2D) - real(real64), intent(out) :: model_2Darray(myDim_nod2D+eDim_nod2D) - character(*), intent(in) :: file + real(real64), intent(out) :: model_2Darray(partit%myDim_nod2D+partit%eDim_nod2D) + character(*), intent(in) :: file character(*), intent(in) :: vari integer :: ierror ! return error code -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (mype==0) then ! open file @@ -304,7 +319,7 @@ subroutine read_2ddata_on_grid_NetCDF(file, vari, itime, model_2Darray, mesh) if (status.ne.nf_noerr)then print*,'ERROR: CANNOT READ runoff FILE CORRECTLY !!!!!' print*,'Error in opening netcdf file'//file - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif diff --git a/src/gen_modules_rotate_grid.F90 b/src/gen_modules_rotate_grid.F90 index 1634ee48b..42f70ddc0 100755 --- a/src/gen_modules_rotate_grid.F90 +++ b/src/gen_modules_rotate_grid.F90 @@ -1,10 +1,13 @@ ! Routines needed to support displaced poles: ! The new pole position is set with -! alphaEuler, betaEuler and gammaEuler. The degfault values +! alphaEuler, betaEuler and gammaEuler. The default values ! alphaEuler=50. [degree] Euler angles, convention: ! betaEuler=15. [degree] first around z, then around new x, ! gammaEuler=-90. [degree] then around new z. ! +! A helpful animation may be found online here: +! https://en.wikipedia.org/wiki/Euler_angles +! ! The first two define the new pole position ! as phi_p=alphaEuler-90, theta_p=90-betaEuler. ! The third, gammaEuler, is in reality irrelevant and just @@ -30,7 +33,6 @@ subroutine set_mesh_transform_matrix ! angle A around z-axis, the second is by an angle B about the new ! x-axis, and the third is by an angle G about the new z-axis. use o_PARAM - use g_PARSUP, only : mype implicit none real(kind=WP) :: al, be, ga @@ -48,7 +50,6 @@ subroutine set_mesh_transform_matrix r2g_matrix(3,1)=sin(be)*sin(al) r2g_matrix(3,2)=-sin(be)*cos(al) r2g_matrix(3,3)=cos(be) - if(mype==0) write(*,*) 'rotation matrix for rotated model grids prepared' end subroutine set_mesh_transform_matrix ! !---------------------------------------------------------------- diff --git a/src/gen_support.F90 b/src/gen_support.F90 index aacbc7719..231a671f3 100644 --- a/src/gen_support.F90 +++ b/src/gen_support.F90 @@ -3,14 +3,15 @@ !2. computing surface integrals of the FESOM fields module g_support USE MOD_MESH - use g_parsup + USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto use o_ARRAYS use g_config, only: dummy implicit none private - public :: smooth_nod, smooth_elem, integrate_nod, extrap_nod + public :: smooth_nod, smooth_elem, integrate_nod, extrap_nod, omp_min_max_sum1, omp_min_max_sum2 real(kind=WP), dimension(:), allocatable :: work_array ! !-------------------------------------------------------------------------------------------- @@ -43,17 +44,23 @@ module g_support ! !-------------------------------------------------------------------------------------------- ! -subroutine smooth_nod2D(arr, N, mesh) +subroutine smooth_nod2D(arr, N, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, intent(in) :: N real(KIND=WP), dimension(:), intent(inout) :: arr integer :: node, elem, j, q, elnodes(3) real(kind=WP) :: vol -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + allocate(work_array(myDim_nod2D)) DO q=1, N !apply mass matrix N times to smooth the field +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, elem, j, q, elnodes, vol) DO node=1, myDim_nod2D vol=0._WP work_array(node)=0._WP @@ -62,50 +69,60 @@ subroutine smooth_nod2D(arr, N, mesh) elnodes=elem2D_nodes(:,elem) work_array(node)=work_array(node)+sum(arr(elnodes))/3._WP*elem_area(elem) vol=vol+elem_area(elem) - END DO - work_array(node)=work_array(node)/vol + END DO + work_array(node)=work_array(node)/vol END DO +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DO node=1,myDim_nod2D arr(node)=work_array(node) ENDDO - call exchange_nod(arr) +!$OMP END PARALLEL DO +!$OMP MASTER + call exchange_nod(arr, partit) +!$OMP END MASTER +!$OMP BARRIER END DO deallocate(work_array) end subroutine smooth_nod2D ! !-------------------------------------------------------------------------------------------- ! -subroutine smooth_nod3D(arr, N_smooth, mesh) +subroutine smooth_nod3D(arr, N_smooth, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer, intent(in) :: N_smooth real(KIND=WP), intent(inout) :: arr(:,:) integer :: n, el, nz, j, q, num_el, nlev, nl_loc, nu_loc integer :: uln, nln, ule, nle - real(kind=WP) :: vol(mesh%nl,myDim_nod2D) + real(kind=WP) :: vol(mesh%nl, partit%myDim_nod2D) real(kind=WP), allocatable :: work_array(:,:) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + nlev=ubound(arr,1) allocate(work_array(nlev,myDim_nod2D)) ! Precompute area of patches on all levels (at the bottom, some neighbouring ! nodes may vanish in the bathymetry) in the first smoothing step +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, el, nz, j, q, num_el, nlev, nl_loc, nu_loc, uln, nln, ule, nle) +!$OMP DO DO n=1, myDim_nod2D uln = ulevels_nod2d(n) nln = min(nlev,nlevels_nod2d(n)) vol( 1:nln,n) = 0._WP work_array(1:nln,n) = 0._WP - !!PS vol( 1:min(nlev, nlevels_nod2d(n)),n) = 0._WP - !!PS work_array(1:min(nlev, nlevels_nod2d(n)),n) = 0._WP DO j=1, nod_in_elem2D_num(n) el = nod_in_elem2D(j,n) -!!PS nl_loc = min(nlev, minval(nlevels_nod2d(elem2D_nodes(1:3,el)))) -!!PS nu_loc = maxval(ulevels_nod2D(elem2D_nodes(1:3,el))) ule = max( uln, ulevels(el) ) nle = min( nln, min(nlev,nlevels(el)) ) - !!PS DO nz=1, nl_loc DO nz=ule, nle vol(nz,n) = vol(nz,n) + elem_area(el) work_array(nz,n) = work_array(nz,n) + elem_area(el) * (arr(nz, elem2D_nodes(1,el)) & @@ -113,46 +130,34 @@ subroutine smooth_nod3D(arr, N_smooth, mesh) + arr(nz, elem2D_nodes(3,el))) END DO ENDDO - !!PS DO nz=1,nlevels_nod2d(n) DO nz=uln,nln vol(nz,n) = 1._WP / (3._WP * vol(nz,n)) ! Here, we need the inverse and scale by 1/3 END DO END DO - +!$OMP END DO ! combined: scale by patch volume + copy back to original field +!$OMP DO DO n=1, myDim_nod2D - !!PS DO nz=1, min(nlev, nlevels_nod2d(n)) uln = ulevels_nod2d(n) nln = min(nlev,nlevels_nod2d(n)) DO nz=uln,nln arr(nz, n) = work_array(nz, n) *vol(nz,n) - if (arr(nz,n)/=arr(nz,n)) then - write(*,*) ' --> found NaN in smoothing' - write(*,*) ' mype = ', mype - write(*,*) ' n = ', n - write(*,*) ' nz,uln,nln = ', nz,uln,nln - write(*,*) ' arr(nz,n) = ', arr(nz,n) - write(*,*) ' work_array(nz,n)= ', work_array(nz,n) - write(*,*) ' vol(nz,n) = ', vol(nz,n) - endif END DO - end DO - - call exchange_nod(arr) - + END DO +!$OMP END DO +!$OMP MASTER + call exchange_nod(arr, partit) +!$OMP END MASTER +!$OMP BARRIER ! And the remaining smoothing sweeps - DO q=1,N_smooth-1 +!$OMP DO DO n=1, myDim_nod2D uln = ulevels_nod2d(n) nln = min(nlev,nlevels_nod2d(n)) - !!PS work_array(1:min(nlev, nlevels_nod2d(n)),n) = 0._WP work_array(1:nln,n) = 0._WP DO j=1,nod_in_elem2D_num(n) el = nod_in_elem2D(j,n) - !!PS nl_loc = min(nlev, minval(nlevels_nod2d(elem2D_nodes(1:3,el)))) - !!PS nu_loc = maxval(ulevels_nod2D(elem2D_nodes(1:3,el))) - !!PS DO nz=1, ule = max( uln, ulevels(el) ) nle = min( nln, min(nlev,nlevels(el)) ) DO nz=ule,nle @@ -162,7 +167,9 @@ subroutine smooth_nod3D(arr, N_smooth, mesh) END DO ENDDO ENDDO +!$OMP END DO ! combined: scale by patch volume + copy back to original field +!$OMP DO DO n=1, myDim_nod2D !!PS DO nz=1, min(nlev, nlevels_nod2d(n)) uln = ulevels_nod2d(n) @@ -170,64 +177,93 @@ subroutine smooth_nod3D(arr, N_smooth, mesh) DO nz=uln,nln arr(nz, n) = work_array(nz, n) *vol(nz,n) END DO - end DO - call exchange_nod(arr) + END DO +!$OMP END DO +!$OMP MASTER + call exchange_nod(arr, partit) +!$OMP END MASTER +!$OMP BARRIER enddo - +!$OMP END PARALLEL deallocate(work_array) end subroutine smooth_nod3D ! !-------------------------------------------------------------------------------------------- ! -subroutine smooth_elem2D(arr, N, mesh) +subroutine smooth_elem2D(arr, N, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, intent(in) :: N real(KIND=WP), dimension(:), intent(inout) :: arr integer :: node, elem, j, q, elnodes(3) real(kind=WP) :: vol -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" allocate(work_array(myDim_nod2D+eDim_nod2D)) DO q=1, N !apply mass matrix N times to smooth the field +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, elem, j, q, elnodes, vol) +!$OMP DO DO node=1, myDim_nod2D vol=0._WP work_array(node)=0._WP DO j=1, nod_in_elem2D_num(node) elem=nod_in_elem2D(j, node) - elnodes=elem2D_nodes(:,elem) work_array(node)=work_array(node)+arr(elem)*elem_area(elem) vol=vol+elem_area(elem) END DO work_array(node)=work_array(node)/vol END DO - call exchange_nod(work_array) +!$OMP END DO +!$OMP MASTER + call exchange_nod(work_array, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:, elem) arr(elem)=sum(work_array(elnodes))/3.0_WP ! Here, we need the inverse and scale by 1/3 ENDDO - call exchange_elem(arr) +!$OMP END DO +!$OMP MASTER + call exchange_elem(arr, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP END PARALLEL END DO deallocate(work_array) end subroutine smooth_elem2D ! !-------------------------------------------------------------------------------------------- ! -subroutine smooth_elem3D(arr, N, mesh) +subroutine smooth_elem3D(arr, N, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, intent(in) :: N real(KIND=WP), dimension(:,:), intent(inout) :: arr integer :: node, elem, my_nl, nz, j, q, elnodes(3) real(kind=WP) :: vol -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" allocate(work_array(myDim_nod2D+eDim_nod2D)) my_nl=ubound(arr,1) DO q=1, N !apply mass matrix N times to smooth the field DO nz=1, my_nl - work_array = 0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, elem, j, q, elnodes, vol) +!$OMP DO + DO node=1, myDim_nod2D+eDim_nod2D + work_array(node) = 0.0_WP + END DO +!$OMP END DO +!$OMP DO DO node=1, myDim_nod2D vol=0._WP if (nz > nlevels_nod2d(node)) CYCLE @@ -243,15 +279,23 @@ subroutine smooth_elem3D(arr, N, mesh) END DO work_array(node)=work_array(node)/vol END DO - call exchange_nod(work_array) +!$OMP END DO +!$OMP MASTER + call exchange_nod(work_array, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO DO elem=1, myDim_elem2D if (nz>nlevels(elem) ) CYCLE if (nz TEST_cavity end do +#if defined(__openmp_reproducible) +!$OMP ORDERED +#endif + lval = lval + lval_row +#if defined(__openmp_reproducible) +!$OMP END ORDERED +#endif end do +!$OMP END PARALLEL DO + int3D=0.0_WP call MPI_AllREDUCE(lval, int3D, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & MPI_COMM_FESOM, MPIerr) @@ -311,10 +385,11 @@ end subroutine integrate_nod_3D ! !-------------------------------------------------------------------------------------------- ! -subroutine extrap_nod3D(arr, mesh) +subroutine extrap_nod3D(arr, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - real(KIND=WP), intent(inout) :: arr(:,:) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(KIND=WP), intent(inout) :: arr(:,:) integer :: n, nl1, nz, k, j, el, cnt, jj real(kind=WP), allocatable :: work_array(:) real(kind=WP) :: val @@ -323,10 +398,13 @@ subroutine extrap_nod3D(arr, mesh) real(kind=WP) :: loc_max, glob_max integer :: loc_sum, glob_sum, glob_sum_old -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ allocate(work_array(myDim_nod2D+eDim_nod2D)) - call exchange_nod(arr) + call exchange_nod(arr, partit) !___________________________________________________________________________ loc_max=maxval(arr(1,:)) @@ -391,7 +469,7 @@ subroutine extrap_nod3D(arr, mesh) end do ! --> do nz=1, nl-1 !_______________________________________________________________________ - call exchange_nod(arr) + call exchange_nod(arr, partit) !_______________________________________________________________________ loc_max=maxval(arr(1,:)) @@ -409,7 +487,7 @@ subroutine extrap_nod3D(arr, mesh) if (arr(nz,n)>0.99_WP*dummy) arr(nz,n)=arr(nz-1,n) end do end do - call exchange_nod(arr) + call exchange_nod(arr, partit) !___________________________________________________________________________ deallocate(work_array) @@ -417,7 +495,138 @@ subroutine extrap_nod3D(arr, mesh) end subroutine extrap_nod3D ! !-------------------------------------------------------------------------------------------- +! returns min/max/sum of a one dimentional array (same as minval) but with the support of OpenMP +FUNCTION omp_min_max_sum1(arr, pos1, pos2, what, partit, nan) + USE MOD_PARTIT + implicit none + real(kind=WP), intent(in) :: arr(:) + integer, intent(in) :: pos1, pos2 + character(3), intent(in) :: what + real(kind=WP), optional :: nan !to be implemented upon the need (for masked arrays) + real(kind=WP) :: omp_min_max_sum1 + real(kind=WP) :: val + integer :: n + + type(t_partit),intent(in), & + target :: partit + + SELECT CASE (trim(what)) + CASE ('sum') + val=0.0_WP +#if !defined(__openmp_reproducible) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) +!$OMP DO REDUCTION(+: val) +#endif + do n=pos1, pos2 + val=val+arr(n) + end do +#if !defined(__openmp_reproducible) +!$OMP END DO +!$OMP END PARALLEL +#endif + + CASE ('min') + val=arr(1) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) +!$OMP DO REDUCTION(min: val) + do n=pos1, pos2 + val=min(val, arr(n)) + end do +!$OMP END DO +!$OMP END PARALLEL + + CASE ('max') + val=arr(1) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) +!$OMP DO REDUCTION(max: val) + do n=pos1, pos2 + val=max(val, arr(n)) + end do +!$OMP END DO +!$OMP END PARALLEL + + CASE DEFAULT + if (partit%mype==0) write(*,*) trim(what), ' is not implemented in omp_min_max_sum case!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) + STOP + END SELECT + + omp_min_max_sum1=val +END FUNCTION ! +!-------------------------------------------------------------------------------------------- +! returns min/max/sum of a two dimentional array (same as minval) but with the support of OpenMP +FUNCTION omp_min_max_sum2(arr, pos11, pos12, pos21, pos22, what, partit, nan) + implicit none + real(kind=WP), intent(in) :: arr(:,:) + integer, intent(in) :: pos11, pos12, pos21, pos22 + character(3), intent(in) :: what + real(kind=WP), optional :: nan !to be implemented upon the need (for masked arrays) + real(kind=WP) :: omp_min_max_sum2 + real(kind=WP) :: val, vmasked, val_part(pos11:pos12) + integer :: i, j + + + type(t_partit),intent(in), & + target :: partit + + IF (PRESENT(nan)) vmasked=nan + + SELECT CASE (trim(what)) + CASE ('min') + if (.not. present(nan)) vmasked=huge(vmasked) !just some crazy number + val=arr(1,1) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j) +!$OMP DO REDUCTION(min: val) + do j=pos21, pos22 + do i=pos11, pos12 + if (arr(i,j)/=vmasked) val=min(val, arr(i,j)) + end do + end do +!$OMP END DO +!$OMP END PARALLEL + + CASE ('max') + if (.not. present(nan)) vmasked=tiny(vmasked) !just some crazy number + val=arr(1,1) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j) +!$OMP DO REDUCTION(max: val) + do j=pos21, pos22 + do i=pos11, pos12 + if (arr(i,j)/=vmasked) val=max(val, arr(i,j)) + end do + end do +!$OMP END DO +!$OMP END PARALLEL + + CASE ('sum') + if (.not. present(nan)) vmasked=huge(vmasked) !just some crazy number + val=0. +#if !defined(__openmp_reproducible) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(i, j) REDUCTION(+: val) + do j=pos21, pos22 + do i=pos11, pos12 + if (arr(i,j)/=vmasked) val=val+arr(i,j) + end do + end do +!$OMP END PARALLEL DO +#else +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(j) + do j=pos21, pos22 + val_part(j) = sum(arr(pos11:pos12,j), mask=(arr(pos11:pos12,j)/=vmasked)) + end do +!$OMP END PARALLEL DO + val = sum(val_part(pos21:pos22)) +#endif + + CASE DEFAULT + if (partit%mype==0) write(*,*) trim(what), ' is not implemented in omp_min_max_sum case!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) + STOP + END SELECT + +omp_min_max_sum2=val +END FUNCTION end module g_support diff --git a/src/gen_surface_forcing.F90 b/src/gen_surface_forcing.F90 index 151e2c8ca..43a4f1fa3 100644 --- a/src/gen_surface_forcing.F90 +++ b/src/gen_surface_forcing.F90 @@ -33,16 +33,17 @@ MODULE g_sbf !! sbc_ini -- inizialization atmpospheric forcing !! sbc_do -- provide a sbc (surface boundary conditions) each time step !! - USE o_ARRAYS USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_comm_auto USE g_support USE g_rotate_grid USE g_config, only: dummy, ClimateDataPath, dt USE g_clock, only: timeold, timenew, dayold, daynew, yearold, yearnew, cyearnew - USE g_forcing_arrays, only: runoff + USE g_forcing_arrays, only: runoff, chl USE g_read_other_NetCDF, only: read_other_NetCDF, read_2ddata_on_grid_netcdf IMPLICIT NONE @@ -81,12 +82,17 @@ MODULE g_sbf logical :: l_cloud = .false. logical :: l_snow = .false. - character(10), save :: runoff_data_source='CORE2' + character(10), save :: runoff_data_source='CORE2' character(len=MAX_PATH), save :: nm_runoff_file ='runoff.nc' - character(10), save :: sss_data_source ='CORE2' + character(10), save :: sss_data_source ='CORE2' character(len=MAX_PATH), save :: nm_sss_data_file ='PHC2_salx.nc' + character(10), save :: chl_data_source ='None' ! 'Sweeney' Chlorophyll climatology Sweeney et al. 2005 + character(len=MAX_PATH), save :: nm_chl_data_file ='/work/ollie/dsidoren/input/forcing/Sweeney_2005.nc' + real(wp), save :: chl_const = 0.1 + + logical :: runoff_climatology =.false. real(wp), allocatable, save, dimension(:), public :: qns ! downward non solar heat over the ocean [W/m2] @@ -122,7 +128,7 @@ MODULE g_sbf character(len=256), save :: nm_prec_file = 'prec.dat' ! name of file with total precipitation, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model character(len=256), save :: nm_snow_file = 'snow.dat' ! name of file with snow precipitation, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model character(len=256), save :: nm_mslp_file = 'mslp.dat' ! name of file with mean sea level pressure, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model - character(len=256), save :: nm_cloud_file = 'cloud.dat' ! name of file with clouds, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model + character(len=256), save :: nm_cloud_file = 'cloud.dat' ! name of file with clouds, if netcdf file then provide only name from "nameyyyy.nc" yyyy.nc will be added by model character(len=34), save :: nm_xwind_var = 'uwnd' ! name of variable in file with wind character(len=34), save :: nm_ywind_var = 'vwnd' ! name of variable in file with wind @@ -178,37 +184,37 @@ MODULE g_sbf CONTAINS - SUBROUTINE nc_readTimeGrid(flf) + SUBROUTINE nc_readTimeGrid(flf, partit) ! Read time array and grid from nc file - IMPLICIT NONE - - type(flfi_type),intent(inout) :: flf - integer :: iost !I/O status - integer :: ncid ! netcdf file id - integer :: i + IMPLICIT NONE + type(flfi_type),intent(inout) :: flf + type(t_partit), intent(inout), target :: partit + integer :: iost !I/O status + integer :: ncid ! netcdf file id + integer :: i ! ID dimensions and variables: - integer :: id_lon - integer :: id_lat - integer :: id_lond - integer :: id_latd - integer :: id_time - integer :: id_timed - integer :: nf_start(4) - integer :: nf_edges(4) - integer :: ierror ! return error code - character(len=20) :: aux_calendar - integer :: aux_len + integer :: id_lon + integer :: id_lat + integer :: id_lond + integer :: id_latd + integer :: id_time + integer :: id_timed + integer :: nf_start(4) + integer :: nf_edges(4) + integer :: ierror ! return error code + character(len=20) :: aux_calendar + integer :: aux_len !open file - if (mype==0) then + if (partit%mype==0) then iost = nf_open(trim(flf%file_name),NF_NOWRITE,ncid) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) ! get dimensions - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "LAT", id_latd) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "lat", id_latd) @@ -220,10 +226,10 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_dimid(ncid, "LAT1", id_latd) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "LON", id_lond) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "lon", id_lond) @@ -235,10 +241,10 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_dimid(ncid, "LON1", id_lond) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimid(ncid, "TIME", id_timed) if (iost .ne. NF_NOERR) then iost = nf_inq_dimid(ncid, "time", id_timed) @@ -247,11 +253,11 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_dimid(ncid, "TIME1", id_timed) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) ! get variable id - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_varid(ncid, "LAT", id_lat) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "lat", id_lat) @@ -263,9 +269,9 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_varid(ncid, "LAT1", id_lat) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) + if (partit%mype==0) then iost = nf_inq_varid(ncid, "LON", id_lon) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "longitude", id_lon) @@ -277,10 +283,10 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_varid(ncid, "LON1", id_lon) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_varid(ncid, "TIME", id_time) if (iost .ne. NF_NOERR) then iost = nf_inq_varid(ncid, "time", id_time) @@ -289,28 +295,28 @@ SUBROUTINE nc_readTimeGrid(flf) iost = nf_inq_varid(ncid, "TIME1",id_time) end if end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) ! get dimensions size - if (mype==0) then + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_latd, flf%nc_Nlat) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_lond, flf%nc_Nlon) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) - if (mype==0) then + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) + if (partit%mype==0) then iost = nf_inq_dimlen(ncid, id_timed,flf%nc_Ntime) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) flf%nc_Nlon=flf%nc_Nlon+2 !for the halo in case of periodic boundary - call MPI_BCast(flf%nc_Nlon, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(flf%nc_Nlat, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(flf%nc_Ntime, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_Nlon, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_Nlat, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_Ntime, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) if (.not. allocated(flf%nc_time)) then allocate( flf%nc_lon(flf%nc_Nlon), flf%nc_lat(flf%nc_Nlat),& @@ -323,38 +329,38 @@ SUBROUTINE nc_readTimeGrid(flf) !____________________________________________________________________________ !read variables from file ! read lat - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=flf%nc_Nlat iost = nf_get_vara_double(ncid, id_lat, nf_start, nf_edges, flf%nc_lat) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) ! read lon - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=flf%nc_Nlon-2 iost = nf_get_vara_double(ncid, id_lon, nf_start, nf_edges, flf%nc_lon(2:flf%nc_Nlon-1)) flf%nc_lon(1) =flf%nc_lon(flf%nc_Nlon-1) flf%nc_lon(flf%nc_Nlon) =flf%nc_lon(2) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) !____________________________________________________________________________ ! read time axis from file - if (mype==0) then + if (partit%mype==0) then nf_start(1)=1 nf_edges(1)=flf%nc_Ntime iost = nf_get_vara_double(ncid, id_time, nf_start, nf_edges, flf%nc_time) ! digg for calendar attribute in time axis variable end if - call MPI_BCast(flf%nc_time, flf%nc_Ntime, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(flf%nc_time, flf%nc_Ntime, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) ! digg for calendar attribute in time axis variable - if (mype==0) then + if (partit%mype==0 .and. use_flpyrcheck) then iost = nf_inq_attlen(ncid, id_time,'calendar',aux_len) iost = nf_get_att(ncid, id_time,'calendar',aux_calendar) aux_calendar = aux_calendar(1:aux_len) @@ -385,7 +391,7 @@ SUBROUTINE nc_readTimeGrid(flf) write(*,*) ' message block in gen_surface_forcing.F90.' write(*,*) '____________________________________________________________' print *, achar(27)//'[0m' - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if elseif ((trim(flf%calendar).eq.'julian') .or. & (trim(flf%calendar).eq.'gregorian') .or. & @@ -406,7 +412,7 @@ SUBROUTINE nc_readTimeGrid(flf) write(*,*) ' gen_surface_forcing.F90' write(*,*) '____________________________________________________________' print *, achar(27)//'[0m' - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if else print *, achar(27)//'[31m' @@ -425,7 +431,7 @@ SUBROUTINE nc_readTimeGrid(flf) write(*,*) ' example with ncdump -h forcing_file.nc ' write(*,*) '____________________________________________________________' print *, achar(27)//'[0m' - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if end if @@ -439,8 +445,8 @@ SUBROUTINE nc_readTimeGrid(flf) flf%nc_time(flf%nc_Ntime) = flf%nc_time(flf%nc_Ntime) + (flf%nc_time(flf%nc_Ntime) - flf%nc_time(flf%nc_Ntime-1))/2.0 end if end if - call MPI_BCast(flf%nc_lon, flf%nc_Nlon, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(flf%nc_lat, flf%nc_Nlat, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_lon, flf%nc_Nlon, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(flf%nc_lat, flf%nc_Nlat, MPI_DOUBLE_PRECISION, 0, partit%MPI_COMM_FESOM, ierror) !___________________________________________________________________________ !flip lat and data in case of lat from -90 to 90 @@ -450,15 +456,15 @@ SUBROUTINE nc_readTimeGrid(flf) if ( flf%nc_lat(1) > flf%nc_lat(flf%nc_Nlat) ) then flip_lat = 1 flf%nc_lat=flf%nc_lat(flf%nc_Nlat:1:-1) - if (mype==0) write(*,*) "fv_sbc: nc_readTimeGrid: FLIP lat and data while lat from -90 to 90" + if (partit%mype==0) write(*,*) "fv_sbc: nc_readTimeGrid: FLIP lat and data while lat from -90 to 90" endif endif - if (mype==0) then + if (partit%mype==0) then iost = nf_close(ncid) end if - call MPI_BCast(iost, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call check_nferr(iost,flf%file_name) + call MPI_BCast(iost, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call check_nferr(iost,flf%file_name,partit) if (ic_cyclic) then flf%nc_lon(1) =flf%nc_lon(1)-360._WP @@ -510,7 +516,7 @@ SUBROUTINE nc_sbc_ini_fillnames(yyyy) if (l_cloud) sbc_flfi(i_cloud)%var_name=ADJUSTL(trim(nm_cloud_var)) END SUBROUTINE nc_sbc_ini_fillnames - SUBROUTINE nc_sbc_ini(rdate, mesh) + SUBROUTINE nc_sbc_ini(rdate, partit, mesh) !!--------------------------------------------------------------------- !! ** Purpose : initialization of ocean forcing from NETCDF file !!---------------------------------------------------------------------- @@ -528,17 +534,19 @@ SUBROUTINE nc_sbc_ini(rdate, mesh) real(wp) :: x, y ! coordinates of elements integer :: fld_idx type(flfi_type), pointer :: flf - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! used for interpolate on elements ! ALLOCATE( bilin_indx_i(elem2D),bilin_indx_j(elem2D), & ! & qns(elem2D), emp(elem2D), qsr(elem2D), & ! & STAT=sbc_alloc ) ! used to inerpolate on nodes - warn = 0 - + warn = 0 ! get ini year; Fill names of sbc_flfi idate=int(rdate) @@ -546,12 +554,14 @@ SUBROUTINE nc_sbc_ini(rdate, mesh) call nc_sbc_ini_fillnames(yyyy) ! we assume that all NetCDF files have identical grid and time variable do fld_idx = 1, i_totfl - call nc_readTimeGrid(sbc_flfi(fld_idx)) + call nc_readTimeGrid(sbc_flfi(fld_idx), partit) end do if (lfirst) then do fld_idx = 1, i_totfl flf=>sbc_flfi(fld_idx) ! prepare nearest coordinates in INfile , save to bilin_indx_i/j +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, x, y) +!$OMP DO REDUCTION(max: warn) do i = 1, myDim_nod2D+eDim_nod2D x = geo_coord_nod2D(1,i)/rad if (x < 0) x=x+360._WP @@ -584,18 +594,20 @@ SUBROUTINE nc_sbc_ini(rdate, mesh) end if end if end do +!$OMP END DO +!$OMP END PARALLEL end do lfirst=.false. end if do fld_idx = 1, i_totfl ! get first coefficients for time interpolation on model grid for all data - call getcoeffld(fld_idx, rdate, mesh) + call getcoeffld(fld_idx, rdate, partit, mesh) end do ! interpolate in time - call data_timeinterp(rdate) + call data_timeinterp(rdate, partit) END SUBROUTINE nc_sbc_ini - SUBROUTINE getcoeffld(fld_idx, rdate, mesh) + SUBROUTINE getcoeffld(fld_idx, rdate, partit, mesh) use forcing_provider_async_module use io_netcdf_workaround_module !!--------------------------------------------------------------------- @@ -606,6 +618,8 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) !! ** Action : !!---------------------------------------------------------------------- IMPLICIT NONE + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer, intent(in) :: fld_idx real(wp),intent(in) :: rdate ! initialization date integer :: iost !I/O status @@ -634,12 +648,14 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) character(len=MAX_PATH), pointer :: file_name character(len=34) , pointer :: var_name real(wp), pointer :: nc_time(:), nc_lon(:), nc_lat(:) - type(t_mesh), intent(in) , target :: mesh real(4), dimension(:,:), pointer :: sbcdata1, sbcdata2 logical sbcdata1_from_cache, sbcdata2_from_cache integer rootrank -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! fld_idx determines which ouf our forcing fields we use here nc_Ntime =>sbc_flfi(fld_idx)%nc_Ntime @@ -658,7 +674,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) sbc_flfi(fld_idx)%sbcdata_a_t_index = -1 allocate(sbc_flfi(fld_idx)%sbcdata_b(nc_Nlon,nc_Nlat)) sbc_flfi(fld_idx)%sbcdata_b_t_index = -1 - sbc_flfi(fld_idx)%read_forcing_rootrank = next_io_rank(MPI_COMM_FESOM, sbc_flfi(fld_idx)%async_netcdf_allowed) + sbc_flfi(fld_idx)%read_forcing_rootrank = next_io_rank(MPI_COMM_FESOM, sbc_flfi(fld_idx)%async_netcdf_allowed, partit) end if rootrank = sbc_flfi(fld_idx)%read_forcing_rootrank @@ -676,7 +692,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) delta_t = 1.0_wp if (mype==0) then write(*,*) 'WARNING: no temporal extrapolation into future (nearest neighbour is used): ', trim(var_name), ' !' - write(*,*) file_name + write(*,*) trim(file_name) write(*,*) nc_time(1), nc_time(nc_Ntime), now_date end if elseif (t_indx < 1) then ! NO extrapolation back in time @@ -685,7 +701,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) delta_t = 1.0_wp if (mype==0) then write(*,*) 'WARNING: no temporal extrapolation back in time (nearest neighbour is used): ', trim(var_name), ' !' - write(*,*) file_name + write(*,*) trim(file_name) write(*,*) nc_time(1), nc_time(nc_Ntime), now_date end if end if @@ -785,8 +801,7 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) ! end if ! bilinear space interpolation, and time interpolation , ! data is assumed to be sampled on a regular grid -!!$OMP PARALLEL -!!$OMP DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(ii, i, j, ip1, jp1, x, y, extrp, x1, x2, y1, y2, denom, data1, data2) do ii = 1, myDim_nod2D+eDim_nod2D i = bilin_indx_i(fld_idx, ii) j = bilin_indx_j(fld_idx, ii) @@ -843,12 +858,11 @@ SUBROUTINE getcoeffld(fld_idx, rdate, mesh) coef_a(fld_idx, ii) = ( data2 - data1 ) / delta_t !( nc_time(t_indx+1) - nc_time(t_indx) ) coef_b(fld_idx, ii) = data1 - coef_a(fld_idx, ii) * nc_time(t_indx) - end do !ii -!!$OMP END DO -!!$OMP END PARALLEL + end do +!$OMP END PARALLEL DO END SUBROUTINE getcoeffld - SUBROUTINE data_timeinterp(rdate) + SUBROUTINE data_timeinterp(rdate, partit) !!--------------------------------------------------------------------- !! *** ROUTINE data_timeinterp *** !! @@ -857,24 +871,23 @@ SUBROUTINE data_timeinterp(rdate) !! ** Action : !!---------------------------------------------------------------------- IMPLICIT NONE - real(wp),intent(in) :: rdate ! seconds + type(t_partit), intent(inout), target :: partit + real(wp), intent(in) :: rdate ! seconds ! assign data from interpolation to taux and tauy integer :: fld_idx, i,j,ii -!!$OMP PARALLEL -!!$OMP DO do fld_idx = 1, i_totfl - do i = 1, myDim_nod2D+eDim_nod2D +!$OMP PARALLEL DO + do i = 1, partit%myDim_nod2D+partit%eDim_nod2D ! store processed forcing data for fesom computation atmdata(fld_idx,i) = rdate * coef_a(fld_idx,i) + coef_b(fld_idx,i) end do !nod2D - end do !fld_idx -!!$OMP END DO -!!$OMP END PARALLEL +!$OMP END PARALLEL DO + end do END SUBROUTINE data_timeinterp - SUBROUTINE sbc_ini(mesh) + SUBROUTINE sbc_ini(partit, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE sbc_ini *** !! @@ -890,7 +903,8 @@ SUBROUTINE sbc_ini(mesh) integer :: sbc_alloc !: allocation status real(wp) :: tx, ty - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit namelist /nam_sbc/ nm_xwind_file, nm_ywind_file, nm_humi_file, nm_qsr_file, & nm_qlw_file, nm_tair_file, nm_prec_file, nm_snow_file, & @@ -898,14 +912,21 @@ SUBROUTINE sbc_ini(mesh) nm_qsr_var, nm_qlw_var, nm_tair_var, nm_prec_var, nm_snow_var, & nm_mslp_var, nm_cloud_var, nm_cloud_file, nm_nc_iyear, nm_nc_imm, nm_nc_idd, nm_nc_freq, nm_nc_tmid, y_perpetual, & l_xwind, l_ywind, l_humi, l_qsr, l_qlw, l_tair, l_prec, l_mslp, l_cloud, l_snow, & - nm_runoff_file, runoff_data_source, runoff_climatology, nm_sss_data_file, sss_data_source + nm_runoff_file, runoff_data_source, runoff_climatology, nm_sss_data_file, sss_data_source, & + chl_data_source, nm_chl_data_file, chl_const + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ! OPEN and read namelist for SBC open( unit=nm_sbc_unit, file='namelist.forcing', form='formatted', access='sequential', status='old', iostat=iost ) if (iost == 0) then if (mype==0) WRITE(*,*) ' file : ', 'namelist_bc.nml',' open ok' else if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist_bc.nml',' ; iostat=',iost - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif READ( nm_sbc_unit, nml=nam_sbc, iostat=iost ) @@ -921,6 +942,8 @@ SUBROUTINE sbc_ini(mesh) write(*,*) "Surface boundary conditions parameters:" end if +#if !defined __ifsinterface + i_totfl=0 if (l_xwind) then if (mype==0) then @@ -1023,21 +1046,32 @@ SUBROUTINE sbc_ini(mesh) emp = 0.0_WP qsr = 0.0_WP ALLOCATE(sbc_flfi(i_totfl)) - call nc_sbc_ini(rdate, mesh) + call nc_sbc_ini(rdate, partit, mesh) !========================================================================== +#endif ! runoff if (runoff_data_source=='CORE1' .or. runoff_data_source=='CORE2' ) then ! runoff in CORE is constant in time ! Warning: For a global mesh, conservative scheme is to be updated!! - call read_other_NetCDF(nm_runoff_file, 'Foxx_o_roff', 1, runoff, .false., mesh) + call read_other_NetCDF(nm_runoff_file, 'Foxx_o_roff', 1, runoff, .false., partit, mesh) runoff=runoff/1000.0_WP ! Kg/s/m2 --> m/s end if + if (use_sw_pene) then + if (chl_data_source == 'Sweeney') then + if (mype==0) write(*,*) trim(chl_data_source) //' chlorophyll climatology will be used' + if (mype==0) write(*,*) 'nm_chl_data_file=', trim(nm_chl_data_file) + else + if (mype==0) write(*,*) 'using constant chlorophyll concentration: ', chl_const + chl=chl_const + end if + end if + if (mype==0) write(*,*) "DONE: Ocean forcing inizialization." if (mype==0) write(*,*) 'Parts of forcing data (only constant in time fields) are read' END SUBROUTINE sbc_ini - SUBROUTINE sbc_do(mesh) + SUBROUTINE sbc_do(partit, mesh) !!--------------------------------------------------------------------- !! *** ROUTINE sbc_do *** !! @@ -1054,10 +1088,14 @@ SUBROUTINE sbc_do(mesh) integer :: yyyy, dd, mm integer, pointer :: nc_Ntime, t_indx, t_indx_p1 real(wp), pointer :: nc_time(:) - character(len=MAX_PATH) :: filename - type(t_mesh), intent(in) , target :: mesh + character(len=MAX_PATH) :: filename + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" force_newcoeff=.false. if (yearnew/=yearold) then @@ -1067,7 +1105,7 @@ SUBROUTINE sbc_do(mesh) call nc_sbc_ini_fillnames(yyyy) ! we assume that all NetCDF files have identical grid and time variable do fld_idx = 1, i_totfl - call nc_readTimeGrid(sbc_flfi(fld_idx)) + call nc_readTimeGrid(sbc_flfi(fld_idx), partit) end do force_newcoeff=.true. end if @@ -1084,31 +1122,47 @@ SUBROUTINE sbc_do(mesh) nc_Ntime =>sbc_flfi(fld_idx)%nc_Ntime if ( ((rdate > nc_time(t_indx_p1)) .and. (nc_time(t_indx) < nc_time(nc_Ntime))) .or. force_newcoeff) then ! get new coefficients for time interpolation on model grid for all data - call getcoeffld(fld_idx, rdate, mesh) + call getcoeffld(fld_idx, rdate, partit, mesh) if (fld_idx==i_xwind) do_rotation=.true. endif end do if (do_rotation) then +!$OMP PARALLEL DO do i=1, myDim_nod2D+eDim_nod2D call vector_g2r(coef_a(i_xwind,i), coef_a(i_ywind,i), coord_nod2D(1,i), coord_nod2D(2,i), 0) call vector_g2r(coef_b(i_xwind,i), coef_b(i_ywind,i), coord_nod2D(1,i), coord_nod2D(2,i), 0) end do +!$OMP END PARALLEL DO end if !========================================================================== ! prepare a flag which checks whether to update monthly data (SSS, river runoff) - update_monthly_flag=((day_in_month==num_day_in_month(fleapyear,month) .and. timenew==86400._WP)) + update_monthly_flag=( (day_in_month==num_day_in_month(fleapyear,month) .AND. timenew==86400._WP) .OR. mstep==1 ) ! read in SSS for applying SSS restoring if (surf_relax_S > 0._WP) then if (sss_data_source=='CORE1' .or. sss_data_source=='CORE2') then if (update_monthly_flag) then - i=month+1 + i=month + if (mstep > 1) i=i+1 if (i > 12) i=1 if (mype==0) write(*,*) 'Updating SSS restoring data for month ', i - call read_other_NetCDF(nm_sss_data_file, 'SALT', i, Ssurf, .true., mesh) + call read_other_NetCDF(nm_sss_data_file, 'SALT', i, Ssurf, .true., partit, mesh) + end if + end if + end if + + ! read in CHL for applying shortwave penetration + if (use_sw_pene) then + if (chl_data_source=='Sweeney') then + if (update_monthly_flag) then + i=month + if (mstep > 1) i=i+1 + if (i > 12) i=1 + if (mype==0) write(*,*) 'Updating chlorophyll climatology for month ', i + call read_other_NetCDF(nm_chl_data_file, 'chl', i, chl, .true., partit, mesh) end if end if end if @@ -1119,23 +1173,24 @@ SUBROUTINE sbc_do(mesh) if(update_monthly_flag) then if(runoff_climatology) then !climatology monthly mean - i=month+1 + i=month + if (mstep > 1) i=i+1 if (i > 12) i=1 if (mype==0) write(*,*) 'Updating monthly climatology runoff for month ', i filename=trim(nm_runoff_file) - call read_2ddata_on_grid_NetCDF(filename,'runoff', i, runoff, mesh) + call read_2ddata_on_grid_NetCDF(filename,'runoff', i, runoff, partit, mesh) !kg/m2/s -> m/s runoff=runoff/1000.0_WP else !monthly data - - i=month+1 + i=month + if (mstep > 1) i=i+1 if (i > 12) i=1 if (mype==0) write(*,*) 'Updating monthly runoff for month ', i filename=trim(nm_runoff_file)//cyearnew//'.nc' - call read_2ddata_on_grid_NetCDF(filename,'runoff', i, runoff, mesh) + call read_2ddata_on_grid_NetCDF(filename,'runoff', i, runoff, partit, mesh) !kg/m2/s -> m/s runoff=runoff/1000.0_WP @@ -1147,27 +1202,10 @@ SUBROUTINE sbc_do(mesh) ! interpolate in time - call data_timeinterp(rdate) + call data_timeinterp(rdate, partit) END SUBROUTINE sbc_do - SUBROUTINE err_call(iost,fname) - !!--------------------------------------------------------------------- - !! *** ROUTINE err_call *** - !! - !! ** Purpose : call Error - !! ** Method : - !! ** Action : - !!---------------------------------------------------------------------- - IMPLICIT NONE - integer, intent(in) :: iost - character(len=MAX_PATH), intent(in) :: fname - write(*,*) 'ERROR: I/O status=',iost,' file= ',fname - STOP 'ERROR: stop' - - - END SUBROUTINE err_call - FUNCTION julday(yyyy,mm,dd) IMPLICIT NONE @@ -1257,14 +1295,15 @@ SUBROUTINE sbc_end & qns, emp, qsr) END SUBROUTINE sbc_end - SUBROUTINE check_nferr(iost,fname) + SUBROUTINE check_nferr(iost,fname, partit) IMPLICIT NONE - character(len=MAX_PATH), intent(in) :: fname - integer, intent(in) :: iost + type(t_partit), intent(inout), target :: partit + character(len=MAX_PATH), intent(in) :: fname + integer, intent(in) :: iost if (iost .ne. NF_NOERR) then write(*,*) 'ERROR: I/O status= "',trim(nf_strerror(iost)),'";',iost,' file= ',fname - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif END SUBROUTINE check_nferr diff --git a/src/ice_EVP.F90 b/src/ice_EVP.F90 index cd994f5db..36d24650c 100755 --- a/src/ice_EVP.F90 +++ b/src/ice_EVP.F90 @@ -1,646 +1,602 @@ module ice_EVP_interfaces - interface - subroutine stress_tensor(ice_strength, mesh) - use g_parsup - use mod_mesh - real(kind=WP), intent(in) :: ice_strength(mydim_elem2D) - type(t_mesh), intent(in), target :: mesh - end subroutine + interface + subroutine stress_tensor(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine stress2rhs(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module - subroutine stress2rhs(inv_areamass, ice_strength, mesh) - USE MOD_MESH - USE g_PARSUP - REAL(kind=WP), intent(in) :: inv_areamass(myDim_nod2D), ice_strength(mydim_elem2D) - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface +module ice_EVPdynamics_interface + interface + subroutine EVPdynamics(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module ! ! Contains routines of EVP dynamics ! -!=================================================================== -subroutine stress_tensor(ice_strength, mesh) +!_______________________________________________________________________________ ! EVP rheology. The routine computes stress tensor components based on ice ! velocity field. They are stored as elemental arrays (sigma11, sigma22 and ! sigma12). The ocean velocity is at nodal locations. -use o_param -use i_param -use mod_mesh -use i_arrays -use g_parsup -USE g_CONFIG - +subroutine stress_tensor(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use g_CONFIG #if defined (__icepack) -use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif - -implicit none - -real(kind=WP), intent(in) :: ice_strength(mydim_elem2D) -real(kind=WP) :: eta, xi, delta, aa -integer :: el, elnodes(3) -real(kind=WP) :: asum, msum, vale, dx(3), dy(3) -real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte -real(kind=WP) :: zeta, delta_inv, d1, d2 - -type(t_mesh), intent(in), target :: mesh - -#include "associate_mesh.h" - - vale = 1.0_WP/(ellipse**2) - - dte = ice_dt/(1.0_WP*evp_rheol_steps) - det1 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) - det2 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) !*ellipse**2 - - - do el=1,myDim_elem2D - !__________________________________________________________________________ - ! if element contains cavity node skip it - !!PS if ( any(ulevels_nod2d(elem2D_nodes(:,el)) > 1) ) cycle - if (ulevels(el) > 1) cycle - - ! ===== Check if there is ice on elem - - ! There is no ice in elem - ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE - if (ice_strength(el) > 0.) then - ! ===== - ! ===== Deformation rate tensor on element elem: - !du/dx - - eps11(el) = sum(gradient_sca(1:3,el)*U_ice(elem2D_nodes(1:3,el))) & - - metric_factor(el) * sum(V_ice(elem2D_nodes(1:3,el)))/3.0_WP - - eps22(el) = sum(gradient_sca(4:6, el)*V_ice(elem2D_nodes(1:3,el))) - - eps12(el) = 0.5_WP*(sum(gradient_sca(4:6,el)*U_ice(elem2D_nodes(1:3,el))) & - + sum(gradient_sca(1:3,el)*V_ice(elem2D_nodes(1:3,el))) & - + metric_factor(el) * sum(U_ice(elem2D_nodes(1:3,el)))/3.0_WP) - ! ===== moduli: - delta = sqrt((eps11(el)*eps11(el) + eps22(el)*eps22(el))*(1.0_WP+vale) + 4.0_WP*vale*eps12(el)*eps12(el) + & - 2.0_WP*eps11(el)*eps22(el)*(1.0_WP-vale)) - - ! ======================================= - ! ===== Here the EVP rheology piece starts - ! ======================================= - - ! ===== viscosity zeta should exceed zeta_min - ! (done via limiting delta from above) - - !if(delta>pressure/zeta_min) delta=pressure/zeta_min - !It does not work properly by - !creating response where ice_strength is small - ! Uncomment and test if necessary - - ! ===== if delta is too small or zero, viscosity will too large (unlimited) - ! (limit delta_inv) - delta_inv = 1.0_WP/max(delta,delta_min) - zeta = ice_strength(el)*delta_inv - ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta - ! is too large in some regions and CFL criterion is violated. - ! The regularization below was introduced by Hunke, - ! but seemingly is not used in the current CICE. - ! Without it divergence and zeta can be noisy (but code - ! remains stable), using it reduces viscosities too strongly. - ! It is therefore commented - - !if (zeta>Clim_evp*voltriangle(el)) then - !zeta=Clim_evp*voltriangle(el) - !end if - - zeta = zeta*Tevp_inv - - r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*Tevp_inv - r2 = zeta*(eps11(el)-eps22(el))*vale - r3 = zeta*eps12(el)*vale - - si1 = det1*(sigma11(el) + sigma22(el) + dte*r1) - si2 = det2*(sigma11(el) - sigma22(el) + dte*r2) + implicit none + type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: el + real(kind=WP) :: det1, det2, dte, vale, r1, r2, r3, si1, si2 + real(kind=WP) :: zeta, delta, delta_inv, d1, d2 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: ice_strength +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + ice_strength=> ice%work%ice_strength(:) + !___________________________________________________________________________ + vale = 1.0_WP/(ice%ellipse**2) + dte = ice%ice_dt/(1.0_WP*ice%evp_rheol_steps) + det1 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) + det2 = 1.0_WP/(1.0_WP + 0.5_WP*ice%Tevp_inv*dte) !*ellipse**2 + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, r1, r2, r3, si1, si2, zeta, delta, delta_inv, d1, d2) + do el=1,myDim_elem2D + !_______________________________________________________________________ + ! if element contains cavity node skip it + if (ulevels(el) > 1) cycle - sigma12(el) = det2*(sigma12(el)+dte*r3) - sigma11(el) = 0.5_WP*(si1+si2) - sigma22(el) = 0.5_WP*(si1-si2) - + ! ===== Check if there is ice on elem + ! There is no ice in elem + ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE + if (ice_strength(el) > 0.) then + ! ===== + ! ===== Deformation rate tensor on element elem: + !du/dx + eps11(el) = sum(gradient_sca(1:3,el)*U_ice(elem2D_nodes(1:3,el))) & + - metric_factor(el) * sum(V_ice(elem2D_nodes(1:3,el)))/3.0_WP + + eps22(el) = sum(gradient_sca(4:6, el)*V_ice(elem2D_nodes(1:3,el))) + + eps12(el) = 0.5_WP*(sum(gradient_sca(4:6,el)*U_ice(elem2D_nodes(1:3,el))) & + + sum(gradient_sca(1:3,el)*V_ice(elem2D_nodes(1:3,el))) & + + metric_factor(el) * sum(U_ice(elem2D_nodes(1:3,el)))/3.0_WP) + ! ===== moduli: + delta = sqrt((eps11(el)*eps11(el) + eps22(el)*eps22(el))*(1.0_WP+vale) + 4.0_WP*vale*eps12(el)*eps12(el) + & + 2.0_WP*eps11(el)*eps22(el)*(1.0_WP-vale)) + + ! ======================================= + ! ===== Here the EVP rheology piece starts + ! ======================================= + + ! ===== viscosity zeta should exceed zeta_min + ! (done via limiting delta from above) + + !if(delta>pressure/ice%zeta_min) delta=pressure/ice%zeta_min + !It does not work properly by + !creating response where ice_strength is small + ! Uncomment and test if necessary + + ! ===== if delta is too small or zero, viscosity will too large (unlimited) + ! (limit delta_inv) + delta_inv = 1.0_WP/max(delta,ice%delta_min) + zeta = ice_strength(el)*delta_inv + ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta + ! is too large in some regions and CFL criterion is violated. + ! The regularization below was introduced by Hunke, + ! but seemingly is not used in the current CICE. + ! Without it divergence and zeta can be noisy (but code + ! remains stable), using it reduces viscosities too strongly. + ! It is therefore commented + + !if (zeta>ice%clim_evp*voltriangle(el)) then + !zeta=ice%clim_evp*voltriangle(el) + !end if + + zeta = zeta*ice%Tevp_inv + + r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*ice%Tevp_inv + r2 = zeta*(eps11(el)-eps22(el))*vale + r3 = zeta*eps12(el)*vale + + si1 = det1*(sigma11(el) + sigma22(el) + dte*r1) + si2 = det2*(sigma11(el) - sigma22(el) + dte*r2) + + sigma12(el) = det2*(sigma12(el)+dte*r3) + sigma11(el) = 0.5_WP*(si1+si2) + sigma22(el) = 0.5_WP*(si1-si2) + #if defined (__icepack) - rdg_conv_elem(el) = -min((eps11(el)+eps22(el)),0.0_WP) - rdg_shear_elem(el) = 0.5_WP*(delta - abs(eps11(el)+eps22(el))) + rdg_conv_elem(el) = -min((eps11(el)+eps22(el)),0.0_WP) + rdg_shear_elem(el) = 0.5_WP*(delta - abs(eps11(el)+eps22(el))) #endif - - endif - end do - + endif + end do +!$OMP END PARALLEL DO end subroutine stress_tensor -!=================================================================== -subroutine stress_tensor_no1(ice_strength, mesh) -! EVP rheology. The routine computes stress tensor components based on ice -! velocity field. They are stored as elemental arrays (sigma11, sigma22 and -! sigma12). The ocean velocity is at nodal locations. -use o_param -use i_param -use mod_mesh -use i_arrays -use g_parsup -USE g_CONFIG -implicit none - -real(kind=WP), intent(in) :: ice_strength(mydim_elem2D) -real(kind=WP) :: eta, xi, delta, aa -integer :: el, elnodes(3) -real(kind=WP) :: asum, msum, vale, dx(3), dy(3) -real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte -real(kind=WP) :: zeta, delta_inv, d1, d2 - -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - vale = 1.0_WP/(ellipse**2) - - dte = ice_dt/(1.0_WP*evp_rheol_steps) - det1 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) - det2 = 1.0_WP/(1.0_WP + 0.5_WP*Tevp_inv*dte) !*ellipse**2 - - - do el=1,myDim_elem2D - !__________________________________________________________________________ - ! if element contains cavity node skip it - !!PS if ( any(ulevels_nod2d(elem2D_nodes(:,el)) > 1) ) cycle - if (ulevels(el) > 1) cycle - ! ===== Check if there is ice on elem - - ! There is no ice in elem - ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE - if (ice_strength(el) > 0.) then - ! ===== - ! ===== Deformation rate tensor on element elem: - !du/dx - - eps11(el) = sum(mesh%gradient_sca(1:3,el)*U_ice(mesh%elem2D_nodes(1:3,el))) & - -mesh% metric_factor(el) * sum(V_ice(mesh%elem2D_nodes(1:3,el)))/3.0_WP - - eps22(el) = sum(mesh%gradient_sca(4:6, el)*V_ice(mesh%elem2D_nodes(1:3,el))) - - eps12(el) = 0.5_WP*(sum(mesh%gradient_sca(4:6,el)*U_ice(mesh%elem2D_nodes(1:3,el))) & - + sum(mesh%gradient_sca(1:3,el)*V_ice(mesh%elem2D_nodes(1:3,el))) & - + mesh%metric_factor(el) * sum(U_ice(mesh%elem2D_nodes(1:3,el)))/3.0_WP) - ! ===== moduli: - delta = sqrt((eps11(el)*eps11(el) + eps22(el)*eps22(el))*(1.0_WP+vale) + 4.0_WP*vale*eps12(el)*eps12(el) + & - 2.0_WP*eps11(el)*eps22(el)*(1.0_WP-vale)) - - ! ======================================= - ! ===== Here the EVP rheology piece starts - ! ======================================= - - ! ===== viscosity zeta should exceed zeta_min - ! (done via limiting delta from above) - - !if(delta>pressure/zeta_min) delta=pressure/zeta_min - !It does not work properly by - !creating response where ice_strength is small - ! Uncomment and test if necessary - - ! ===== if delta is too small or zero, viscosity will too large (unlimited) - ! (limit delta_inv) - delta_inv = 1.0_WP/max(delta,delta_min) - zeta = ice_strength(el)*delta_inv - ! ===== Limiting pressure/Delta (zeta): it may still happen that pressure/Delta - ! is too large in some regions and CFL criterion is violated. - ! The regularization below was introduced by Hunke, - ! but seemingly is not used in the current CICE. - ! Without it divergence and zeta can be noisy (but code - ! remains stable), using it reduces viscosities too strongly. - ! It is therefore commented - - !if (zeta>Clim_evp*voltriangle(el)) then - !zeta=Clim_evp*voltriangle(el) - !end if - - zeta = zeta*Tevp_inv - - r1 = zeta*(eps11(el)+eps22(el)) - ice_strength(el)*Tevp_inv - r2 = zeta*(eps11(el)-eps22(el))*vale - r3 = zeta*eps12(el)*vale - - si1 = det1*(sigma11(el) + sigma22(el) + dte*r1) - si2 = det2*(sigma11(el) - sigma22(el) + dte*r2) - - sigma12(el) = det2*(sigma12(el)+dte*r3) - sigma11(el) = 0.5_WP*(si1+si2) - sigma22(el) = 0.5_WP*(si1-si2) - endif - end do -end subroutine stress_tensor_no1 -!=================================================================== -subroutine stress2rhs_e(mesh) +! +! +!_______________________________________________________________________________ ! EVP implementation: ! Computes the divergence of stress tensor and puts the result into the -! rhs vectors. Velocity is at nodes. -! The divergence is computed in a cysly over edges. It is slower that the -! approach in stress2rhs_e inherited from FESOM - - -USE MOD_MESH -USE o_PARAM -USE i_PARAM -USE i_therm_param -USE i_arrays -USE g_PARSUP - - -IMPLICIT NONE -INTEGER :: n, elem, ed, elnodes(3), el(2), ednodes(2) -REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 - -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - DO n=1, myDim_nod2D - U_rhs_ice(n)=0.0_WP - V_rhs_ice(n)=0.0_WP - END DO - - ! Stress divergence - DO ed=1,myDim_edge2D - ednodes=edges(:,ed) - el=edge_tri(:,ed) - if(myList_edge2D(ed)>edge2D_in) cycle - ! elements on both sides - uc = - sigma12(el(1))*edge_cross_dxdy(1,ed) + sigma11(el(1))*edge_cross_dxdy(2,ed) & - + sigma12(el(2))*edge_cross_dxdy(3,ed) - sigma11(el(2))*edge_cross_dxdy(4,ed) - - vc = - sigma22(el(1))*edge_cross_dxdy(1,ed) + sigma12(el(1))*edge_cross_dxdy(2,ed) & - + sigma22(el(2))*edge_cross_dxdy(3,ed) - sigma12(el(2))*edge_cross_dxdy(4,ed) - - U_rhs_ice(ednodes(1)) = U_rhs_ice(ednodes(1)) + uc - U_rhs_ice(ednodes(2)) = U_rhs_ice(ednodes(2)) - uc - V_rhs_ice(ednodes(1)) = V_rhs_ice(ednodes(1)) + vc - V_rhs_ice(ednodes(2)) = V_rhs_ice(ednodes(2)) - vc - END DO - - DO n=1, myDim_nod2D +! rhs vectors +subroutine stress2rhs(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_PARAM + IMPLICIT NONE + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - ! if cavity node skip it - if ( ulevels_nod2d(n) > 1 ) cycle - + INTEGER :: n, el, k + REAL(kind=WP) :: val3 !___________________________________________________________________________ - mass = area(1,n)*(rhoice*m_ice(n)+rhosno*m_snow(n)) - if(mass > 1.e-3_WP) then - U_rhs_ice(n) = U_rhs_ice(n) / mass - V_rhs_ice(n) = V_rhs_ice(n) / mass - else - U_rhs_ice(n)=0.0_WP - V_rhs_ice(n)=0.0_WP - end if - END DO - ! - ! elevation gradient contribution - ! - do elem=1,myDim_elem2D - !__________________________________________________________________________ - ! if element contains cavity node skip it - if (ulevels(elem) > 1) cycle - - !__________________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - uc=elem_area(elem)*g*sum(gradient_sca(1:3,elem)*elevation(elnodes))/3.0_WP - vc=elem_area(elem)*g*sum(gradient_sca(4:6,elem)*elevation(elnodes))/3.0_WP - U_rhs_ice(elnodes)=U_rhs_ice(elnodes) - uc/area(1,elnodes) - V_rhs_ice(elnodes)=V_rhs_ice(elnodes) - vc/area(1,elnodes) - END DO -end subroutine stress2rhs_e -!=================================================================== -subroutine stress2rhs(inv_areamass, ice_strength, mesh) -! EVP implementation: -! Computes the divergence of stress tensor and puts the result into the -! rhs vectors - -USE MOD_MESH -USE o_PARAM -USE i_PARAM -USE i_THERM_PARAM -USE g_PARSUP -USE i_arrays - -IMPLICIT NONE -REAL(kind=WP), intent(in) :: inv_areamass(myDim_nod2D), ice_strength(mydim_elem2D) -INTEGER :: n, el, k -REAL(kind=WP):: val3 -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - -val3=1/3.0_WP - -DO n=1, myDim_nod2D - U_rhs_ice(n)=0.0_WP - V_rhs_ice(n)=0.0_WP -END DO - -do el=1,myDim_elem2D - ! ===== Skip if ice is absent - -! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE - !____________________________________________________________________________ - ! if element contains cavity node skip it - !!OS if ( any(ulevels_nod2d(elem2D_nodes(:,el)) > 1) ) cycle - if (ulevels(el) > 1) cycle - - !____________________________________________________________________________ - if (ice_strength(el) > 0._WP) then - -!$IVDEP - DO k=1,3 - - U_rhs_ice(elem2D_nodes(k,el)) = U_rhs_ice(elem2D_nodes(k,el)) & - - elem_area(el) * & - (sigma11(el)*gradient_sca(k,el) + sigma12(el)*gradient_sca(k+3,el) & - +sigma12(el)*val3*metric_factor(el)) !metrics - - V_rhs_ice(elem2D_nodes(k,el)) = V_rhs_ice(elem2D_nodes(k,el)) & - - elem_area(el) * & - (sigma12(el)*gradient_sca(k,el) + sigma22(el)*gradient_sca(k+3,el) & - -sigma11(el)*val3*metric_factor(el)) - END DO - - - endif - end do - - DO n=1, myDim_nod2D - !__________________________________________________________________________ - ! if cavity node skip it - if (ulevels_nod2d(n)>1) cycle - - !__________________________________________________________________________ - if (inv_areamass(n) > 0._WP) then - U_rhs_ice(n) = U_rhs_ice(n)*inv_areamass(n) + rhs_a(n) - V_rhs_ice(n) = V_rhs_ice(n)*inv_areamass(n) + rhs_m(n) - else - U_rhs_ice(n) = 0._WP - V_rhs_ice(n) = 0._WP - endif - END DO + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m + real(kind=WP), dimension(:), pointer :: inv_areamass, ice_strength +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + inv_areamass => ice%work%inv_areamass(:) + ice_strength => ice%work%ice_strength(:) + + !___________________________________________________________________________ + val3=1/3.0_WP + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, el, k) +!$OMP DO + DO n=1, myDim_nod2D + U_rhs_ice(n)=0.0_WP + V_rhs_ice(n)=0.0_WP + END DO +!$OMP END DO +!$OMP DO + do el=1,myDim_elem2D + ! ===== Skip if ice is absent + ! if (any(m_ice(elnodes)<= 0.) .or. any(a_ice(elnodes) <=0.)) CYCLE + !_______________________________________________________________________ + ! if element contains cavity node skip it + if (ulevels(el) > 1) cycle + + !_______________________________________________________________________ + if (ice_strength(el) > 0._WP) then + DO k=1,3 +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(elem2D_nodes(k,el))) +#else +!$OMP ORDERED +#endif + U_rhs_ice(elem2D_nodes(k,el)) = U_rhs_ice(elem2D_nodes(k,el)) & + - elem_area(el) * & + (sigma11(el)*gradient_sca(k,el) + sigma12(el)*gradient_sca(k+3,el) & + +sigma12(el)*val3*metric_factor(el)) !metrics + + V_rhs_ice(elem2D_nodes(k,el)) = V_rhs_ice(elem2D_nodes(k,el)) & + - elem_area(el) * & + (sigma12(el)*gradient_sca(k,el) + sigma22(el)*gradient_sca(k+3,el) & + -sigma11(el)*val3*metric_factor(el)) + +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(elem2D_nodes(k,el))) +#else +!$OMP END ORDERED +#endif + END DO + endif + end do +!$OMP END DO +!$OMP DO + DO n=1, myDim_nod2D + !_______________________________________________________________________ + ! if cavity node skip it + if (ulevels_nod2d(n)>1) cycle + + !_______________________________________________________________________ + if (inv_areamass(n) > 0._WP) then + U_rhs_ice(n) = U_rhs_ice(n)*inv_areamass(n) + rhs_a(n) + V_rhs_ice(n) = V_rhs_ice(n)*inv_areamass(n) + rhs_m(n) + else + U_rhs_ice(n) = 0._WP + V_rhs_ice(n) = 0._WP + endif + END DO +!$OMP END DO +!$OMP END PARALLEL end subroutine stress2rhs ! ! -!=================================================================== -subroutine EVPdynamics(mesh) +!_______________________________________________________________________________ ! EVP implementation. Does subcycling and boundary conditions. ! Velocities at nodes -USE MOD_MESH -USE o_PARAM -USE i_ARRAYS -USE i_PARAM -USE i_therm_param -USE g_PARSUP -USE o_ARRAYS -USE g_CONFIG -USE g_comm_auto -use ice_EVP_interfaces - +subroutine EVPdynamics(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_PARAM + USE o_ARRAYS + USE g_CONFIG + USE g_comm_auto + use ice_EVP_interfaces #if defined (__icepack) - use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength - use icedrv_main, only: icepack_to_fesom + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: icepack_to_fesom #endif - -IMPLICIT NONE -integer :: steps, shortstep -real(kind=WP) :: rdt, asum, msum, r_a, r_b -real(kind=WP) :: drag, det, umod, rhsu, rhsv -integer :: n, ed, ednodes(2), el, elnodes(3) -real(kind=WP) :: ax, ay, aa, elevation_dx, elevation_dy - -real(kind=WP) :: inv_areamass(myDim_nod2D), inv_mass(myDim_nod2D) -real(kind=WP) :: ice_strength(myDim_elem2D), elevation_elem(3), p_ice(3) -integer :: use_pice - -real(kind=WP) :: eta, xi, delta -integer :: k -real(kind=WP) :: vale, dx(3), dy(3), val3 -real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte -real(kind=WP) :: zeta, delta_inv, d1, d2 - -INTEGER :: elem -REAL(kind=WP) :: mass, uc, vc, deltaX1, deltaX2, deltaY1, deltaY2 - -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - -! If Icepack is used, always update the tracers - + IMPLICIT NONE + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: steps, shortstep + real(kind=WP) :: rdt, asum, msum, r_a, r_b + real(kind=WP) :: drag, det, umod, rhsu, rhsv + integer :: n, ed, ednodes(2), el, elnodes(3) + real(kind=WP) :: ax, ay, aa, elevation_dx, elevation_dy + + real(kind=WP) :: elevation_elem(3), p_ice(3) + integer :: use_pice + + real(kind=WP) :: eta, delta + integer :: k + real(kind=WP) :: vale, dx(3), dy(3), val3 + real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2, dte + real(kind=WP) :: zeta, delta_inv, d1, d2 + INTEGER :: elem + !_______________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: u_ice_old, v_ice_old + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m + real(kind=WP), dimension(:), pointer :: u_w, v_w, elevation + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=WP), dimension(:), pointer :: inv_areamass, inv_mass, ice_strength #if defined (__icepack) - a_ice_old(:) = a_ice(:) - m_ice_old(:) = a_ice(:) - m_snow_old(:) = m_snow(:) - - call icepack_to_fesom (nx_in=(myDim_nod2D+eDim_nod2D), & - aice_out=a_ice, & - vice_out=m_ice, & - vsno_out=m_snow) + real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old #endif - -rdt=ice_dt/(1.0*evp_rheol_steps) -ax=cos(theta_io) -ay=sin(theta_io) + real(kind=WP) , pointer :: inv_rhowat, rhosno, rhoice +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + u_ice_old => ice%uice_old(:) + v_ice_old => ice%vice_old(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + elevation => ice%srfoce_ssh(:) + stress_atmice_x => ice%stress_atmice_x(:) + stress_atmice_y => ice%stress_atmice_y(:) +#if defined (__icepack) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) +#endif + rhosno => ice%thermo%rhosno + rhoice => ice%thermo%rhoice + inv_rhowat => ice%thermo%inv_rhowat + + inv_areamass => ice%work%inv_areamass(:) + inv_mass => ice%work%inv_mass(:) + ice_strength => ice%work%ice_strength(:) -! Precompute values that are never changed during the iteration - inv_areamass =0.0_WP - inv_mass =0.0_WP - rhs_a =0.0_WP - rhs_m =0.0_WP - do n=1,myDim_nod2D !___________________________________________________________________________ - ! if cavity node skip it - if (ulevels_nod2d(n)>1) cycle + ! If Icepack is used, always update the tracers +#if defined (__icepack) + a_ice_old(:) = a_ice(:) + m_ice_old(:) = a_ice(:) + m_snow_old(:) = m_snow(:) + call icepack_to_fesom (nx_in=(myDim_nod2D+eDim_nod2D), & + aice_out=a_ice, & + vice_out=m_ice, & + vsno_out=m_snow) +#endif !___________________________________________________________________________ - if ((rhoice*m_ice(n)+rhosno*m_snow(n)) > 1.e-3_WP) then - inv_areamass(n) = 1._WP/(area(1,n)*(rhoice*m_ice(n)+rhosno*m_snow(n))) - else - inv_areamass(n) = 0._WP - endif + rdt=ice%ice_dt/(1.0*ice%evp_rheol_steps) + ax=cos(ice%theta_io) + ay=sin(ice%theta_io) + + !___________________________________________________________________________ + ! Precompute values that are never changed during the iteration +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + inv_areamass(n) =0.0_WP + inv_mass(n) =0.0_WP + rhs_a(n) =0.0_WP + rhs_m(n) =0.0_WP + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n) + do n=1,myDim_nod2D + !_______________________________________________________________________ + ! if cavity node skip it + if (ulevels_nod2d(n)>1) cycle - if (a_ice(n) < 0.01_WP) then - ! Skip if ice is absent - inv_mass(n) = 0._WP - else - inv_mass(n) = (rhoice*m_ice(n)+rhosno*m_snow(n))/a_ice(n) - inv_mass(n) = 1.0_WP/max(inv_mass(n), 9.0_WP) ! Limit the mass - ! if it is too small - endif + !_______________________________________________________________________ + if ((rhoice*m_ice(n)+rhosno*m_snow(n)) > 1.e-3_WP) then + inv_areamass(n) = 1._WP/(area(1,n)*(rhoice*m_ice(n)+rhosno*m_snow(n))) + else + inv_areamass(n) = 0._WP + endif + + if (a_ice(n) < 0.01_WP) then + ! Skip if ice is absent + inv_mass(n) = 0._WP + else + inv_mass(n) = (rhoice*m_ice(n)+rhosno*m_snow(n))/a_ice(n) + inv_mass(n) = 1.0_WP/max(inv_mass(n), 9.0_WP) ! Limit the mass + ! if it is too small + endif + rhs_a(n)=0.0_WP ! these are used as temporal storage here + rhs_m(n)=0.0_WP ! for the contribution due to ssh + enddo +!$OMP END PARALLEL DO - rhs_a(n)=0.0_WP ! these are used as temporal storage here - rhs_m(n)=0.0_WP ! for the contribution due to ssh - enddo - -!_______________________________________________________________________________ !!PS -use_pice=0 -if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 -if ( .not. trim(which_ALE)=='linfs') then - ! for full free surface include pressure from ice mass - ice_strength=0.0_WP - do el = 1,myDim_elem2D - - elnodes = elem2D_nodes(:,el) - !_______________________________________________________________________ - ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(el) > 1) cycle - - !_______________________________________________________________________ - if (any(m_ice(elnodes)<=0._WP) .or. & - any(a_ice(elnodes)<=0._WP)) then - - ! There is no ice in elem - ice_strength(el) = 0._WP - - !_______________________________________________________________________ - else - msum = sum(m_ice(elnodes))/3.0_WP - asum = sum(a_ice(elnodes))/3.0_WP - - !___________________________________________________________________ - ! Hunke and Dukowicz c*h*p* + !___________________________________________________________________________ + use_pice=0 + if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 + if ( .not. trim(which_ALE)=='linfs') then + ! for full free surface include pressure from ice mass +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(el, elnodes, msum, asum, aa, p_ice, elevation_elem, elevation_dx, elevation_dy) +!$OMP DO + do el = 1,myDim_elem2D + elnodes = elem2D_nodes(:,el) + ice_strength(el)=0.0_WP + !___________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(el) > 1) cycle + + !___________________________________________________________________ + if (any(m_ice(elnodes)<=0._WP) .or. & + any(a_ice(elnodes)<=0._WP)) then + + ! There is no ice in elem + ice_strength(el) = 0._WP + + !___________________________________________________________________ + else + msum = sum(m_ice(elnodes))/3.0_WP + asum = sum(a_ice(elnodes))/3.0_WP + + !_______________________________________________________________ + ! Hunke and Dukowicz c*h*p* #if defined (__icepack) - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) #else - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) #endif - ice_strength(el) = 0.5_WP*ice_strength(el) - - !___________________________________________________________________ - ! use rhs_m and rhs_a for storing the contribution from elevation: - aa = 9.81_WP*elem_area(el)/3.0_WP - - !___________________________________________________________________ - ! add and limit pressure from ice weight in case of floating ice - ! like in FESOM 1.4 - p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat - do n=1,3 - p_ice(n)=min(p_ice(n),max_ice_loading) - end do -!!PS p_ice= 0.0_WP - - !___________________________________________________________________ - elevation_elem = elevation(elnodes) - elevation_dx = sum(gradient_sca(1:3,el)*(elevation_elem+p_ice*use_pice)) - elevation_dy = sum(gradient_sca(4:6,el)*(elevation_elem+p_ice*use_pice)) - - !___________________________________________________________________ - rhs_a(elnodes) = rhs_a(elnodes)-aa*elevation_dx - rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy - end if - enddo -else - ! for linear free surface - ice_strength=0.0_WP - do el = 1,myDim_elem2D - elnodes = elem2D_nodes(:,el) - !_______________________________________________________________________ - ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(el) > 1) cycle - - !_______________________________________________________________________ - if (any(m_ice(elnodes) <= 0._WP) .or. & - any(a_ice(elnodes) <=0._WP)) then - - ! There is no ice in elem - ice_strength(el) = 0._WP - else - msum = sum(m_ice(elnodes))/3.0_WP - asum = sum(a_ice(elnodes))/3.0_WP - - ! ===== Hunke and Dukowicz c*h*p* + ice_strength(el) = 0.5_WP*ice_strength(el) + + !_______________________________________________________________ + ! use rhs_m and rhs_a for storing the contribution from elevation: + aa = 9.81_WP*elem_area(el)/3.0_WP + + !_______________________________________________________________ + ! add and limit pressure from ice weight in case of floating ice + ! like in FESOM 1.4 + p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat + do n=1,3 + p_ice(n)=min(p_ice(n),max_ice_loading) + end do + !!PS p_ice= 0.0_WP + + !_______________________________________________________________ + elevation_elem = elevation(elnodes) + elevation_dx = sum(gradient_sca(1:3,el)*(elevation_elem+p_ice*use_pice)) + elevation_dy = sum(gradient_sca(4:6,el)*(elevation_elem+p_ice*use_pice)) + + !_______________________________________________________________ + rhs_a(elnodes) = rhs_a(elnodes)-aa*elevation_dx + rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy + end if + enddo +!$OMP END DO +!$OMP END PARALLEL + else + ! for linear free surface +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, elnodes, msum, asum, aa, elevation_elem, elevation_dx, elevation_dy) + do el = 1,myDim_elem2D + ice_strength(el)=0.0_WP + elnodes = elem2D_nodes(:,el) + !___________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(el) > 1) cycle + + !___________________________________________________________________ + if (any(m_ice(elnodes) <= 0._WP) .or. & + any(a_ice(elnodes) <=0._WP)) then + + ! There is no ice in elem + ice_strength(el) = 0._WP + else + msum = sum(m_ice(elnodes))/3.0_WP + asum = sum(a_ice(elnodes))/3.0_WP + + ! ===== Hunke and Dukowicz c*h*p* #if defined (__icepack) - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) #else - ice_strength(el) = pstar*msum*exp(-c_pressure*(1.0_WP-asum)) + ice_strength(el) = ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) #endif - ice_strength(el) = 0.5_WP*ice_strength(el) - - ! use rhs_m and rhs_a for storing the contribution from elevation: - aa = 9.81_WP*elem_area(el)/3.0_WP - - elevation_dx = sum(gradient_sca(1:3,el)*elevation(elnodes)) - elevation_dy = sum(gradient_sca(4:6,el)*elevation(elnodes)) - - rhs_a(elnodes) = rhs_a(elnodes)-aa*elevation_dx - rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy - end if - enddo -endif ! --> if ( .not. trim(which_ALE)=='linfs') then - -do n=1,myDim_nod2D - if (ulevels_nod2d(n)>1) cycle + ice_strength(el) = 0.5_WP*ice_strength(el) + + ! use rhs_m and rhs_a for storing the contribution from elevation: + aa = 9.81_WP*elem_area(el)/3.0_WP + + elevation_dx = sum(gradient_sca(1:3,el)*elevation(elnodes)) + elevation_dy = sum(gradient_sca(4:6,el)*elevation(elnodes)) + + rhs_a(elnodes) = rhs_a(elnodes)-aa*elevation_dx + rhs_m(elnodes) = rhs_m(elnodes)-aa*elevation_dy + end if + enddo +!$OMP END PARALLEL DO + endif ! --> if ( .not. trim(which_ALE)=='linfs') then +!$OMP PARALLEL DO !___________________________________________________________________________ - rhs_a(n) = rhs_a(n)/area(1,n) - rhs_m(n) = rhs_m(n)/area(1,n) - enddo -! End of Precomputing - -!============================================================== -! And the ice stepping starts - + do n=1,myDim_nod2D + if (ulevels_nod2d(n)>1) cycle + rhs_a(n) = rhs_a(n)/area(1,n) + rhs_m(n) = rhs_m(n)/area(1,n) + enddo +!$OMP END PARALLEL DO + !___________________________________________________________________________ + ! End of Precomputing --> And the ice stepping starts #if defined (__icepack) - rdg_conv_elem(:) = 0.0_WP - rdg_shear_elem(:) = 0.0_WP + rdg_conv_elem(:) = 0.0_WP + rdg_shear_elem(:) = 0.0_WP #endif + do shortstep=1, ice%evp_rheol_steps + !_______________________________________________________________________ + call stress_tensor(ice, partit, mesh) + call stress2rhs(ice, partit, mesh) + !_______________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, ed, umod, drag, rhsu, rhsv, r_a, r_b, det) +!$OMP DO + do n=1,myDim_nod2D+eDim_nod2D + U_ice_old(n) = U_ice(n) !PS + V_ice_old(n) = V_ice(n) !PS + end do +!$OMP END DO +!$OMP DO + do n=1,myDim_nod2D + !___________________________________________________________________ + ! if cavity node skip it + if ( ulevels_nod2d(n)>1 ) cycle + + !___________________________________________________________________ + if (a_ice(n) >= 0.01_WP) then ! Skip if ice is absent + umod = sqrt((U_ice(n)-U_w(n))**2+(V_ice(n)-V_w(n))**2) + drag = ice%cd_oce_ice*umod*density_0*inv_mass(n) + + rhsu = U_ice(n) +rdt*(drag*(ax*U_w(n) - ay*V_w(n))+ & + inv_mass(n)*stress_atmice_x(n) + U_rhs_ice(n)) + rhsv = V_ice(n) +rdt*(drag*(ax*V_w(n) + ay*U_w(n))+ & + inv_mass(n)*stress_atmice_y(n) + V_rhs_ice(n)) + + r_a = 1._WP + ax*drag*rdt + r_b = rdt*(mesh%coriolis_node(n) + ay*drag) + det = 1.0_WP/(r_a*r_a + r_b*r_b) + U_ice(n) = det*(r_a*rhsu +r_b*rhsv) + V_ice(n) = det*(r_a*rhsv -r_b*rhsu) + else ! Set velocities to 0 if ice is absent + U_ice(n) = 0.0_WP + V_ice(n) = 0.0_WP + end if + end do +!$OMP END DO + !_______________________________________________________________________ + ! apply sea ice velocity boundary condition +!$OMP DO + DO ed=1,myDim_edge2D + !___________________________________________________________________ + ! apply coastal sea ice velocity boundary conditions + if(myList_edge2D(ed) > edge2D_in) then + U_ice(edges(1:2,ed))=0.0_WP + V_ice(edges(1:2,ed))=0.0_WP + endif + + !___________________________________________________________________ + ! apply sea ice velocity boundary conditions at cavity-ocean edge + if (use_cavity) then + if ( (ulevels(edge_tri(1,ed))>1) .or. & + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(edges(1,ed))) +#else +!$OMP ORDERED +#endif + U_ice(edges(1,ed))=0.0_WP + V_ice(edges(1,ed))=0.0_WP -do shortstep=1, evp_rheol_steps - - call stress_tensor(ice_strength, mesh) - call stress2rhs(inv_areamass,ice_strength, mesh) - - U_ice_old = U_ice !PS - V_ice_old = V_ice !PS - do n=1,myDim_nod2D - - !_________________________________________________________________________ - ! if cavity ndoe skip it - if ( ulevels_nod2d(n)>1 ) cycle - - !_________________________________________________________________________ - if (a_ice(n) >= 0.01_WP) then ! Skip if ice is absent - - - umod = sqrt((U_ice(n)-U_w(n))**2+(V_ice(n)-V_w(n))**2) - - drag = Cd_oce_ice*umod*density_0*inv_mass(n) - - rhsu = U_ice(n) +rdt*(drag*(ax*U_w(n) - ay*V_w(n))+ & - inv_mass(n)*stress_atmice_x(n) + U_rhs_ice(n)) - - rhsv = V_ice(n) +rdt*(drag*(ax*V_w(n) + ay*U_w(n))+ & - inv_mass(n)*stress_atmice_y(n) + V_rhs_ice(n)) - - r_a = 1._WP + ax*drag*rdt - r_b = rdt*(coriolis_node(n) + ay*drag) - - det = 1.0_WP/(r_a*r_a + r_b*r_b) - - U_ice(n) = det*(r_a*rhsu +r_b*rhsv) - V_ice(n) = det*(r_a*rhsv -r_b*rhsu) - else ! Set velocities to 0 if ice is absent - U_ice(n) = 0.0_WP - V_ice(n) = 0.0_WP - end if - - end do - DO ed=1,myDim_edge2D - ! boundary conditions - if(myList_edge2D(ed) > edge2D_in) then - U_ice(edges(1:2,ed))=0.0_WP - V_ice(edges(1:2,ed))=0.0_WP - endif - end do - - call exchange_nod(U_ice,V_ice) -END DO - +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(edges(1,ed))) + call omp_set_lock (partit%plock(edges(2,ed))) +#endif + U_ice(edges(2,ed))=0.0_WP + V_ice(edges(2,ed))=0.0_WP +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(edges(2,ed))) +#else +!$OMP END ORDERED +#endif + end if + end if + end do +!$OMP END DO +!$OMP END PARALLEL +!write(*,*) partit%mype, shortstep, 'CP4' + !_______________________________________________________________________ + call exchange_nod(U_ice,V_ice,partit) +!$OMP BARRIER + END DO !--> do shortstep=1, ice%evp_rheol_steps end subroutine EVPdynamics diff --git a/src/ice_fct.F90 b/src/ice_fct.F90 index dcee2c15c..f6c7230f8 100755 --- a/src/ice_fct.F90 +++ b/src/ice_fct.F90 @@ -1,72 +1,152 @@ module ice_fct_interfaces - interface - subroutine ice_mass_matrix_fill(mesh) - use MOD_MESH - type(t_mesh), intent(in) , target :: mesh - end subroutine + interface + subroutine ice_mass_matrix_fill(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine - subroutine ice_solve_high_order(mesh) - use MOD_MESH - type(t_mesh), intent(in) , target :: mesh - end subroutine + subroutine ice_solve_high_order(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine - subroutine ice_solve_low_order(mesh) - use MOD_MESH - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine ice_fem_fct(tr_array_id, mesh) - use MOD_MESH - integer :: tr_array_id - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface + subroutine ice_solve_low_order(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + integer :: tr_array_id + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_TG_rhs_div(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_TG_rhs(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine ice_update_for_div(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + end interface end module - -! +! +! +!_______________________________________________________________________________ ! This file collect subroutines implementing FE-FCT ! advection scheme by Loehner et al. ! There is a tunable parameter ice_gamma_fct. ! Increasing it leads to positivity preserving solution. - +! ! Driving routine is fct_ice_solve. It calles other routines ! that do low-order and figh order solutions and then combine them in a flux ! corrected way. Taylor-Galerkin scheme is used as a high-order one. - +! ! The code is adapted from FESOM ! -! ===================================================================== -subroutine ice_TG_rhs(mesh) - use MOD_MESH - use i_Arrays - use i_PARAM - use g_PARSUP - use o_PARAM - USE g_CONFIG - implicit none - real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) - integer :: n, q, row, elem, elnodes(3) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - +! +!_______________________________________________________________________________ +subroutine ice_TG_rhs(ice, partit, mesh) + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_ICE + use o_PARAM + USE g_CONFIG + implicit none + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) + integer :: n, q, row, elem, elnodes(3) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, rhs_temp +#endif +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + rhs_temp => ice%data(4)%values_rhs(:) +#endif + !___________________________________________________________________________ ! Taylor-Galerkin (Lax-Wendroff) rhs +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, q, row, elem, elnodes, diff, entries, um, vm, vol, dx, dy) +!$OMP DO DO row=1, myDim_nod2D rhs_m(row)=0._WP rhs_a(row)=0._WP rhs_ms(row)=0._WP -#if defined (__oifs) - ths_temp(row)=0._WP +#if defined (__oifs) || defined (__ifsinterface) + rhs_temp(row)=0._WP #endif /* (__oifs) */ END DO - +!$OMP END DO ! Velocities at nodes +!$OMP DO do elem=1,myDim_elem2D !assembling rhs over elements + elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if cavity element skip it if (ulevels(elem)>1) cycle - elnodes=elem2D_nodes(:,elem) !derivatives dx=gradient_sca(1:3,elem) dy=gradient_sca(4:6,elem) @@ -77,100 +157,61 @@ subroutine ice_TG_rhs(mesh) vm=sum(V_ice(elnodes)) !diffusivity - diff=ice_diff*sqrt(elem_area(elem)/scale_area) + diff=ice%ice_diff*sqrt(elem_area(elem)/scale_area) DO n=1,3 row=elnodes(n) DO q = 1,3 !entries(q)= vol*dt*((dx(n)*um+dy(n)*vm)/3.0_WP - & ! diff*(dx(n)*dx(q)+ dy(n)*dy(q))- & ! 0.5*dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))) - entries(q)= vol*ice_dt*((dx(n)*(um+u_ice(elnodes(q)))+ & + entries(q)= vol*ice%ice_dt*((dx(n)*(um+u_ice(elnodes(q)))+ & dy(n)*(vm+v_ice(elnodes(q))))/12.0_WP - & diff*(dx(n)*dx(q)+ dy(n)*dy(q))- & - 0.5_WP*ice_dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0_WP) + 0.5_WP*ice%ice_dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0_WP) END DO rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes)) rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes)) rhs_ms(row)=rhs_ms(row)+sum(entries*m_snow(elnodes)) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) rhs_temp(row)=rhs_temp(row)+sum(entries*ice_temp(elnodes)) #endif /* (__oifs) */ END DO end do +!$OMP END DO +!$OMP END PARALLEL end subroutine ice_TG_rhs ! -!---------------------------------------------------------------------------- -! -subroutine ice_fct_init(mesh) - use o_PARAM - use MOD_MESH - use i_ARRAYS - use g_PARSUP - use ice_fct_interfaces - implicit none - integer :: n_size - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - - n_size=myDim_nod2D+eDim_nod2D - - ! Initialization of arrays necessary to implement FCT algorithm - allocate(m_icel(n_size), a_icel(n_size), m_snowl(n_size)) ! low-order solutions - m_icel=0.0_WP - a_icel=0.0_WP - m_snowl=0.0_WP -#if defined (__oifs) - allocate(m_templ(n_size)) - allocate(dm_temp(n_size)) -#endif /* (__oifs) */ - allocate(icefluxes(myDim_elem2D,3)) - allocate(icepplus(n_size), icepminus(n_size)) - icefluxes = 0.0_WP - icepplus = 0.0_WP - icepminus= 0.0_WP - -#if defined (__oifs) - m_templ=0.0_WP - dm_temp=0.0_WP -#endif /* (__oifs) */ - - allocate(dm_ice(n_size), da_ice(n_size), dm_snow(n_size)) ! increments of high - dm_ice = 0.0_WP ! order solutions - da_ice = 0.0_WP - dm_snow = 0.0_WP - - ! Fill in the mass matrix - call ice_mass_matrix_fill(mesh) - if (mype==0) write(*,*) 'Ice FCT is initialized' -end subroutine ice_fct_init -! -!---------------------------------------------------------------------------- ! -subroutine ice_fct_solve(mesh) - use MOD_MESH +!_______________________________________________________________________________ +subroutine ice_fct_solve(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use ice_fct_interfaces implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !_____________________________________________________________________________ ! Driving routine - call ice_solve_high_order(mesh) ! uses arrays of low-order solutions as temp + call ice_solve_high_order(ice, partit, mesh) ! uses arrays of low-order solutions as temp ! storage. It should preceed the call of low ! order solution. - call ice_solve_low_order(mesh) + call ice_solve_low_order(ice, partit, mesh) - call ice_fem_fct(1, mesh) ! m_ice - call ice_fem_fct(2, mesh) ! a_ice - call ice_fem_fct(3, mesh) ! m_snow -#if defined (__oifs) - call ice_fem_fct(4, mesh) ! ice_temp + call ice_fem_fct(1, ice, partit, mesh) ! m_ice + call ice_fem_fct(2, ice, partit, mesh) ! a_ice + call ice_fem_fct(3, ice, partit, mesh) ! m_snow +#if defined (__oifs) || defined (__ifsinterface) + call ice_fem_fct(4, ice, partit, mesh) ! ice_temp #endif /* (__oifs) */ end subroutine ice_fct_solve ! ! !_______________________________________________________________________________ -subroutine ice_solve_low_order(mesh) +subroutine ice_solve_low_order(ice, partit, mesh) !============================ ! Low-order solution @@ -181,23 +222,51 @@ subroutine ice_solve_low_order(mesh) ! We add diffusive contribution to the rhs. The diffusion operator ! is implemented as the difference between the consistent and lumped mass ! matrices acting on the field from the previous time step. The consistent - ! mass matrix on the lhs is replaced with the lumped one. - - use MOD_MESH - use o_MESH - use i_ARRAYS - use i_PARAM - use g_PARSUP + ! mass matrix on the lhs is replaced with the lumped one. + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use g_comm_auto implicit none + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ integer :: row, clo, clo2, cn, location(100) real(kind=WP) :: gamma - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - gamma=ice_gamma_fct ! Added diffusivity parameter - ! Adjust it to ensure posivity of solution + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms + real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:), pointer :: mass_matrix +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, rhs_temp, m_templ +#endif +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) + mass_matrix => ice%work%fct_massmatrix(:) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + rhs_temp => ice%data(4)%values_rhs(:) + m_templ => ice%data(4)%valuesl(:) +#endif + !___________________________________________________________________________ + gamma=ice%ice_gamma_fct ! Added diffusivity parameter + ! Adjust it to ensure posivity of solution +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row, clo, clo2, cn, location) do row=1,myDim_nod2D !_______________________________________________________________________ ! if there is cavity no ice fxt low order @@ -217,145 +286,203 @@ subroutine ice_solve_low_order(mesh) m_snowl(row)=(rhs_ms(row)+gamma*sum(mass_matrix(clo:clo2)* & m_snow(location(1:cn))))/area(1,row) + & (1.0_WP-gamma)*m_snow(row) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) m_templ(row)=(rhs_temp(row)+gamma*sum(mass_matrix(clo:clo2)* & ice_temp(location(1:cn))))/area(1,row) + & (1.0_WP-gamma)*ice_temp(row) #endif /* (__oifs) */ end do - +!$OMP END PARALLEL DO ! Low-order solution must be known to neighbours - call exchange_nod(m_icel,a_icel,m_snowl) - -#if defined (__oifs) - call exchange_nod(m_templ) + call exchange_nod(m_icel,a_icel,m_snowl, partit) +#if defined (__oifs) || defined (__ifsinterface) + call exchange_nod(m_templ, partit) #endif /* (__oifs) */ - - +!$OMP BARRIER end subroutine ice_solve_low_order ! ! !_______________________________________________________________________________ -subroutine ice_solve_high_order(mesh) - - use MOD_MESH - use O_MESH - use i_ARRAYS - use g_PARSUP - use o_PARAM - use g_comm_auto - implicit none - ! - integer :: n,i,clo,clo2,cn,location(100),row - real(kind=WP) :: rhs_new - integer :: num_iter_solve=3 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - ! Does Taylor-Galerkin solution - ! - !the first approximation - do row=1,myDim_nod2D - !___________________________________________________________________________ - ! if cavity node skip it - if (ulevels_nod2d(row)>1) cycle - - dm_ice(row)=rhs_m(row)/area(1,row) - da_ice(row)=rhs_a(row)/area(1,row) - dm_snow(row)=rhs_ms(row)/area(1,row) -#if defined (__oifs) - dm_temp(row)=rhs_temp(row)/area(1,row) -#endif /* (__oifs) */ - end do - - call exchange_nod(dm_ice, da_ice, dm_snow) - -#if defined (__oifs) - call exchange_nod(dm_temp) -#endif /* (__oifs) */ - !iterate - do n=1,num_iter_solve-1 - do row=1,myDim_nod2D - !___________________________________________________________________________ +subroutine ice_solve_high_order(ice, partit, mesh) + USE MOD_ICE + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_PARAM + use g_comm_auto + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n,clo,clo2,cn,location(100),row + real(kind=WP) :: rhs_new + integer :: num_iter_solve=3 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms + real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow + real(kind=WP), dimension(:), pointer :: mass_matrix +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: rhs_temp, m_templ, dm_temp +#endif +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) + da_ice => ice%data(1)%dvalues(:) + dm_ice => ice%data(2)%dvalues(:) + dm_snow => ice%data(3)%dvalues(:) + mass_matrix => ice%work%fct_massmatrix(:) +#if defined (__oifs) || defined (__ifsinterface) + rhs_temp => ice%data(4)%values_rhs(:) + m_templ => ice%data(4)%valuesl(:) + dm_temp => ice%data(4)%dvalues(:) +#endif + !___________________________________________________________________________ + ! Does Taylor-Galerkin solution + ! + !the first approximation +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) + do row=1,myDim_nod2D ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle - clo=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1)+1 - clo2=ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(1) - cn=clo2-clo+1 - location(1:cn)=nn_pos(1:cn,row) - rhs_new=rhs_m(row) - sum(mass_matrix(clo:clo2)*dm_ice(location(1:cn))) - m_icel(row)=dm_ice(row)+rhs_new/area(1,row) - rhs_new=rhs_a(row) - sum(mass_matrix(clo:clo2)*da_ice(location(1:cn))) - a_icel(row)=da_ice(row)+rhs_new/area(1,row) - rhs_new=rhs_ms(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) - m_snowl(row)=dm_snow(row)+rhs_new/area(1,row) -#if defined (__oifs) - rhs_new=rhs_temp(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) - m_templ(row)=dm_temp(row)+rhs_new/area(1,row) + dm_ice(row)=rhs_m(row)/area(1,row) + da_ice(row)=rhs_a(row)/area(1,row) + dm_snow(row)=rhs_ms(row)/area(1,row) +#if defined (__oifs) || defined (__ifsinterface) + dm_temp(row)=rhs_temp(row)/area(1,row) +#endif /* (__oifs) */ + end do +!$OMP END PARALLEL DO + call exchange_nod(dm_ice, da_ice, dm_snow, partit) +#if defined (__oifs) || defined (__ifsinterface) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ - end do - do row=1,myDim_nod2D +!$OMP BARRIER + !___________________________________________________________________________ + !iterate + do n=1,num_iter_solve-1 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, clo, clo2, cn, location, row, rhs_new) +!$OMP DO + do row=1,myDim_nod2D + ! if cavity node skip it + if (ulevels_nod2d(row)>1) cycle + !___________________________________________________________________ + clo = ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1)+1 + clo2 = ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(1) + cn = clo2-clo+1 + location(1:cn)=nn_pos(1:cn,row) + !___________________________________________________________________ + rhs_new = rhs_m(row) - sum(mass_matrix(clo:clo2)*dm_ice(location(1:cn))) + m_icel(row) = dm_ice(row)+rhs_new/area(1,row) + rhs_new = rhs_a(row) - sum(mass_matrix(clo:clo2)*da_ice(location(1:cn))) + a_icel(row) = da_ice(row)+rhs_new/area(1,row) + rhs_new = rhs_ms(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) + m_snowl(row)= dm_snow(row)+rhs_new/area(1,row) +#if defined (__oifs) || defined (__ifsinterface) + rhs_new = rhs_temp(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) + m_templ(row)= dm_temp(row)+rhs_new/area(1,row) +#endif /* (__oifs) */ + end do +!$OMP END DO !_______________________________________________________________________ - ! if cavity node skip it - if (ulevels_nod2d(row)>1) cycle - - dm_ice(row)=m_icel(row) - da_ice(row)=a_icel(row) - dm_snow(row)=m_snowl(row) -#if defined (__oifs) - dm_temp(row)=m_templ(row) +!$OMP DO + do row=1,myDim_nod2D + ! if cavity node skip it + if (ulevels_nod2d(row)>1) cycle + dm_ice(row)=m_icel(row) + da_ice(row)=a_icel(row) + dm_snow(row)=m_snowl(row) +#if defined (__oifs) || defined (__ifsinterface) + dm_temp(row)=m_templ(row) #endif /* (__oifs) */ - end do - call exchange_nod(dm_ice, da_ice, dm_snow) - -#if defined (__oifs) - call exchange_nod(dm_temp) + end do +!$OMP END DO +!$OMP END PARALLEL + !_______________________________________________________________________ + call exchange_nod(dm_ice, da_ice, dm_snow, partit) +#if defined (__oifs) || defined (__ifsinterface) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ - - end do +!$OMP BARRIER + end do end subroutine ice_solve_high_order ! ! !_______________________________________________________________________________ -subroutine ice_fem_fct(tr_array_id, mesh) - ! Flux corrected transport algorithm for tracer advection - ! - ! It is based on Loehner et al. (Finite-element flux-corrected - ! transport (FEM-FCT) for the Euler and Navier-Stokes equation, - ! Int. J. Numer. Meth. Fluids, 7 (1987), 1093--1109) as described by Kuzmin and - ! Turek. (kuzmin@math.uni-dortmund.de) - ! - - use MOD_MESH - use O_MESH - use i_arrays - use i_param +! Flux corrected transport algorithm for tracer advection +! It is based on Loehner et al. (Finite-element flux-corrected +! transport (FEM-FCT) for the Euler and Navier-Stokes equation, +! Int. J. Numer. Meth. Fluids, 7 (1987), 1093--1109) as described by Kuzmin and +! Turek. (kuzmin@math.uni-dortmund.de) +subroutine ice_fem_fct(tr_array_id, ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use o_PARAM - use g_PARSUP use g_comm_auto implicit none - - integer :: tr_array_id - integer :: icoef(3,3),n,q, elem,elnodes(3),row - real(kind=WP), allocatable, dimension(:) :: tmax, tmin - real(kind=WP) :: vol, flux, ae, gamma - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: tr_array_id + integer :: icoef(3,3), n, q, elem, elnodes(3), row + real(kind=WP) :: vol, flux, ae, gamma + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:) , pointer :: da_ice, dm_ice, dm_snow + real(kind=WP), dimension(:) , pointer :: icepplus, icepminus, tmax, tmin + real(kind=WP), dimension(:,:), pointer :: icefluxes +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:) , pointer :: ice_temp, m_templ, dm_temp +#endif +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) + da_ice => ice%data(1)%dvalues(:) + dm_ice => ice%data(2)%dvalues(:) + dm_snow => ice%data(3)%dvalues(:) + icefluxes => ice%work%fct_fluxes(:,:) + icepplus => ice%work%fct_plus(:) + icepminus => ice%work%fct_minus(:) + tmax => ice%work%fct_tmax(:) + tmin => ice%work%fct_tmin(:) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + m_templ => ice%data(4)%valuesl(:) + dm_temp => ice%data(4)%dvalues(:) +#endif + !___________________________________________________________________________ + ! It should coinside with gamma in ts_solve_low_order + gamma=ice%ice_gamma_fct - gamma=ice_gamma_fct ! It should coinside with gamma in - ! ts_solve_low_order - - !========================== + !___________________________________________________________________________ ! Compute elemental antidiffusive fluxes to nodes - !========================== ! This is the most unpleasant part --- ! it takes memory and time. For every element ! we need its antidiffusive contribution to ! each of its 3 nodes - - allocate(tmax(myDim_nod2D), tmin(myDim_nod2D)) tmax = 0.0_WP tmin = 0.0_WP @@ -365,14 +492,15 @@ subroutine ice_fem_fct(tr_array_id, mesh) ! Cycle over rows row=elnodes(n) icoef(n,n)=-2 end do - + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, q, elem, elnodes, row, vol, flux, ae) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ @@ -398,7 +526,7 @@ subroutine ice_fem_fct(tr_array_id, mesh) end do end if -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) if (tr_array_id==4) then do q=1,3 icefluxes(elem,q)=-sum(icoef(:,q)*(gamma*ice_temp(elnodes) + & @@ -407,54 +535,63 @@ subroutine ice_fem_fct(tr_array_id, mesh) end if #endif /* (__oifs) */ end do - - !========================== +!$OMP END DO + !___________________________________________________________________________ ! Screening the low-order solution - !========================== ! TO BE ADDED IF FOUND NECESSARY ! Screening means comparing low-order solutions with the ! solution on the previous time step and using whichever ! is greater/smaller in computations of max/min below - !========================== + !___________________________________________________________________________ ! Cluster min/max - !========================== if (tr_array_id==1) then +!$OMP DO do row=1, myDim_nod2D + if (ulevels_nod2d(row)>1) cycle n=nn_num(row) tmax(row)=maxval(m_icel(nn_pos(1:n,row))) tmin(row)=minval(m_icel(nn_pos(1:n,row))) - ! Admissible increments + ! Admissible increments tmax(row)=tmax(row)-m_icel(row) tmin(row)=tmin(row)-m_icel(row) end do +!$OMP END DO end if if (tr_array_id==2) then +!$OMP DO do row=1, myDim_nod2D + if (ulevels_nod2d(row)>1) cycle n=nn_num(row) tmax(row)=maxval(a_icel(nn_pos(1:n,row))) tmin(row)=minval(a_icel(nn_pos(1:n,row))) - ! Admissible increments + ! Admissible increments tmax(row)=tmax(row)-a_icel(row) tmin(row)=tmin(row)-a_icel(row) end do +!$OMP END DO end if if (tr_array_id==3) then +!$OMP DO do row=1, myDim_nod2D + if (ulevels_nod2d(row)>1) cycle n=nn_num(row) tmax(row)=maxval(m_snowl(nn_pos(1:n,row))) tmin(row)=minval(m_snowl(nn_pos(1:n,row))) - ! Admissible increments + ! Admissible increments tmax(row)=tmax(row)-m_snowl(row) tmin(row)=tmin(row)-m_snowl(row) end do +!$OMP END DO end if -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) if (tr_array_id==4) then +!$OMP DO do row=1, myDim_nod2D + if (ulevels_nod2d(row)>1) cycle n=nn_num(row) tmax(row)=maxval(m_templ(nn_pos(1:n,row))) tmin(row)=minval(m_templ(nn_pos(1:n,row))) @@ -462,40 +599,50 @@ subroutine ice_fem_fct(tr_array_id, mesh) tmax(row)=tmax(row)-m_templ(row) tmin(row)=tmin(row)-m_templ(row) end do +!$OMP END DO end if #endif /* (__oifs) */ - !========================= + !___________________________________________________________________________ ! Sums of positive/negative fluxes to node row - !========================= - icepplus=0._WP - icepminus=0._WP +!$OMP DO + do n=1, myDim_nod2D+eDim_nod2D + icepplus (n)=0._WP + icepminus(n)=0._WP + end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D - !_______________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - - !_______________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ + elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) flux=icefluxes(elem,q) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED +#endif if (flux>0) then icepplus(n)=icepplus(n)+flux else - icepminus(n)=icepminus(n)+flux + icepminus(n)=icepminus(n)+flux end if +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED +#endif end do end do - - !======================== +!$OMP END DO + !___________________________________________________________________________ ! The least upper bound for the correction factors - !======================== +!$OMP DO do n=1,myDim_nod2D - !_______________________________________________________________________ ! if cavity cycle over if(ulevels_nod2D(n)>1) cycle !LK89140 @@ -513,22 +660,21 @@ subroutine ice_fem_fct(tr_array_id, mesh) icepminus(n)=0._WP end if end do +!$OMP END DO ! pminus and pplus are to be known to neighbouting PE - call exchange_nod(icepminus, icepplus) - - !======================== +!$OMP MASTER + call exchange_nod(icepminus, icepplus, partit) +!$OMP END MASTER +!$OMP BARRIER + !___________________________________________________________________________ ! Limiting - !======================== +!$OMP DO do elem=1, myDim_elem2D - !_______________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - - !_______________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 !_______________________________________________________________________ + elnodes=elem2D_nodes(:,elem) ae=1.0_WP do q=1,3 n=elnodes(q) @@ -538,150 +684,215 @@ subroutine ice_fem_fct(tr_array_id, mesh) end do icefluxes(elem,:)=ae*icefluxes(elem,:) end do - - !========================== +!$OMP END DO + !___________________________________________________________________________ ! Update the solution - !========================== if(tr_array_id==1) then +!$OMP DO do n=1,myDim_nod2D if(ulevels_nod2D(n)>1) cycle !LK89140 m_ice(n)=m_icel(n) - end do + end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D - !___________________________________________________________________ ! if cavity cycle over - !PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 elnodes=elem2D_nodes(:,elem) do q=1,3 n=elnodes(q) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED +#endif m_ice(n)=m_ice(n)+icefluxes(elem,q) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED +#endif end do - end do + end do +!$OMP END DO end if if(tr_array_id==2) then +!$OMP DO do n=1,myDim_nod2D if(ulevels_nod2D(n)>1) cycle !LK89140 a_ice(n)=a_icel(n) - end do + end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D - !___________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 elnodes=elem2D_nodes(:,elem) do q=1,3 - n=elnodes(q) + n=elnodes(q) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED +#endif a_ice(n)=a_ice(n)+icefluxes(elem,q) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED +#endif end do - end do + end do +!$OMP END DO end if if(tr_array_id==3) then +!$OMP DO do n=1,myDim_nod2D if(ulevels_nod2D(n)>1) cycle !LK89140 m_snow(n)=m_snowl(n) - end do + end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - !___________________________________________________________________ ! if cavity cycle over - !!PS if(any(ulevels_nod2D(elnodes)>1)) cycle !LK89140 if(ulevels(elem)>1) cycle !LK89140 + elnodes=elem2D_nodes(:,elem) do q=1,3 - n=elnodes(q) - m_snow(n)=m_snow(n)+icefluxes(elem,q) + n=elnodes(q) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED +#endif + m_snow(n)=m_snow(n)+icefluxes(elem,q) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED +#endif end do - end do + end do +!$OMP END DO end if -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) if(tr_array_id==4) then +!$OMP DO do n=1,myDim_nod2D + if(ulevels_nod2D(n)>1) cycle !LK89140 ice_temp(n)=m_templ(n) end do +!$OMP END DO +!$OMP DO do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - !___________________________________________________________________ ! if cavity cycle over if(ulevels(elem)>1) cycle !LK89140 + elnodes=elem2D_nodes(:,elem) do q=1,3 - n=elnodes(q) - ice_temp(n)=ice_temp(n)+icefluxes(elem,q) + n=elnodes(q) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(n)) +#else +!$OMP ORDERED +#endif + ice_temp(n)=ice_temp(n)+icefluxes(elem,q) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(n)) +#else +!$OMP END ORDERED +#endif end do end do +!$OMP END DO end if +#endif /* (__oifs) */ || defined (__ifsinterface) +!$OMP END PARALLEL + call exchange_nod(m_ice, a_ice, m_snow, partit) +#if defined (__oifs) || defined (__ifsinterface) + call exchange_nod(ice_temp, partit) #endif /* (__oifs) */ - - call exchange_nod(m_ice, a_ice, m_snow) - -#if defined (__oifs) - call exchange_nod(ice_temp) -#endif /* (__oifs) */ - - deallocate(tmin, tmax) +!$OMP BARRIER end subroutine ice_fem_fct ! ! !_______________________________________________________________________________ -SUBROUTINE ice_mass_matrix_fill(mesh) ! Used in ice_fct inherited from FESOM - use MOD_MESH - use O_MESH - use i_PARAM - use i_ARRAYS - use g_PARSUP - ! - implicit none - integer :: n, n1, n2, row - - integer :: elem, elnodes(3), q, offset, col, ipos - integer, allocatable :: col_pos(:) - real(kind=WP) :: aa - integer :: flag=0,iflag=0 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" +SUBROUTINE ice_mass_matrix_fill(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, k, row + integer :: elem, elnodes(3), q, offset, ipos + real(kind=WP) :: aa + integer :: flag=0, iflag=0 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: mass_matrix +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + mass_matrix => ice%work%fct_massmatrix(:) ! ! a) - allocate(mass_matrix(sum(nn_num(1:myDim_nod2D)))) - mass_matrix =0.0_WP - allocate(col_pos(myDim_nod2D+eDim_nod2D)) - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k, row, elem, elnodes, q, offset, ipos, aa) +!$OMP DO DO elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) - !___________________________________________________________________ - ! if cavity cycle over - if(ulevels(elem)>1) cycle - + !_______________________________________________________________________ do n=1,3 row=elnodes(n) if(row>myDim_nod2D) cycle + !___________________________________________________________________ ! Global-to-local neighbourhood correspondence - DO q=1,nn_num(row) - col_pos(nn_pos(q,row))=q - END DO + ! we have to modify col_pos construction for OMP compatibility. The MPI version might become a bit slower :( + ! loop over number of neghbouring nodes of node-row offset=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1) - DO q=1,3 - col=elnodes(q) - ipos=offset+col_pos(col) - mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP - if(q==n) then - mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP - end if - END DO + do q=1, 3 + !_______________________________________________________________ + ! if element is cavity cycle over + if(ulevels(elem)>1) cycle + do k=1, nn_num(row) + if (nn_pos(k,row)==elnodes(q)) then + ipos=offset+k + exit + end if + if (k==nn_num(row)) write(*,*) 'FATAL ERROR' + end do +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(row)) ! it shall be sufficient to block writing into the same row of SSH_stiff +#else +!$OMP ORDERED +#endif + mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP + if(q==n) then + mass_matrix(ipos)=mass_matrix(ipos)+elem_area(elem)/12.0_WP + end if +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(row)) +#else +!$OMP END ORDERED +#endif + END DO end do END DO - +!$OMP END DO ! TEST: area==sum of row entries in mass_matrix: +!$OMP DO DO q=1,myDim_nod2D - !___________________________________________________________________ ! if cavity cycle over if(ulevels_nod2d(q)>1) cycle @@ -689,200 +900,306 @@ SUBROUTINE ice_mass_matrix_fill(mesh) offset=ssh_stiff%rowptr(q)-ssh_stiff%rowptr(1)+1 n=ssh_stiff%rowptr(q+1)-ssh_stiff%rowptr(1) aa=sum(mass_matrix(offset:n)) - if(abs(area(1,q)-aa)>.1_WP) then + !!PS if(abs(area(1,q)-aa)>.1_WP) then + if(abs(area(ulevels_nod2d(q),q)-aa)>.1_WP) then +!$OMP CRITICAL iflag=q flag=1 +!$OMP END CRITICAL endif END DO +!$OMP END DO +!$OMP END PARALLEL if(flag>0) then offset=ssh_stiff%rowptr(iflag)-ssh_stiff%rowptr(1)+1 n=ssh_stiff%rowptr(iflag+1)-ssh_stiff%rowptr(1) - aa=sum(mass_matrix(offset:n)) - write(*,*) '#### MASS MATRIX PROBLEM', mype, iflag, aa, area(1,iflag) +#if !defined(__openmp_reproducible) + aa=0 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:aa) +!$OMP DO + do row=offset, n + aa=aa+mass_matrix(row) + end do +!$OMP END DO +!$OMP END PARALLEL +#else + aa = sum(mass_matrix(offset:n)) +#endif + write(*,*) '#### MASS MATRIX PROBLEM', mype, iflag, aa, area(1,iflag), ulevels_nod2D(iflag) endif - deallocate(col_pos) END SUBROUTINE ice_mass_matrix_fill ! -!========================================================= ! -subroutine ice_TG_rhs_div(mesh) - use MOD_MESH - use i_Arrays - use i_PARAM - use g_PARSUP - use o_PARAM - USE g_CONFIG - implicit none - real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) - integer :: n, q, row, elem, elnodes(3) - real(kind=WP) :: c1, c2, c3, c4, cx1, cx2, cx3, cx4, entries2(3) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! Computes the rhs in a Taylor-Galerkin way (with upwind type of - ! correction for the advection operator) - ! In this version I tr to split divergent term off, so that FCT works without it. - - DO row=1, myDim_nod2D - !! row=myList_nod2D(m) - rhs_m(row)=0.0_WP - rhs_a(row)=0.0_WP - rhs_ms(row)=0.0_WP -#if defined (__oifs) - rhs_temp(row)=0.0_WP +!_______________________________________________________________________________ +subroutine ice_TG_rhs_div(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_PARAM + USE g_CONFIG + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + real(kind=WP) :: diff, entries(3), um, vm, vol, dx(3), dy(3) + integer :: n, q, row, elem, elnodes(3) + real(kind=WP) :: c1, c2, c3, c4, cx1, cx2, cx3, cx4, entries2(3) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m, rhs_ms + real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, rhs_temp, rhs_tempdiv +#endif +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhs_ms => ice%data(3)%values_rhs(:) + rhs_adiv => ice%data(1)%values_div_rhs(:) + rhs_mdiv => ice%data(2)%values_div_rhs(:) + rhs_msdiv => ice%data(3)%values_div_rhs(:) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + rhs_temp => ice%data(4)%values_rhs(:) + rhs_tempdiv => ice%data(4)%values_div_rhs(:) +#endif + !___________________________________________________________________________ + ! Computes the rhs in a Taylor-Galerkin way (with upwind type of + ! correction for the advection operator) + ! In this version I tr to split divergent term off, so that FCT works without it. + do row=1, myDim_nod2D + !! row=myList_nod2D(m) + rhs_m(row)=0.0_WP + rhs_a(row)=0.0_WP + rhs_ms(row)=0.0_WP +#if defined (__oifs) || defined (__ifsinterface) + rhs_temp(row)=0.0_WP #endif /* (__oifs) */ - rhs_mdiv(row)=0.0_WP - rhs_adiv(row)=0.0_WP - rhs_msdiv(row)=0.0_WP -#if defined (__oifs) - rhs_tempdiv(row)=0.0_WP + rhs_mdiv(row)=0.0_WP + rhs_adiv(row)=0.0_WP + rhs_msdiv(row)=0.0_WP +#if defined (__oifs) || defined (__ifsinterface) + rhs_tempdiv(row)=0.0_WP #endif /* (__oifs) */ - END DO - do elem=1,myDim_elem2D !assembling rhs over elements - !___________________________________________________________________________ - ! if cavity element skip it - if (ulevels(elem)>1) cycle - - !! elem=myList_elem2D(m) - elnodes=elem2D_nodes(:,elem) - !derivatives - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - vol=elem_area(elem) - um=sum(u_ice(elnodes)) - vm=sum(v_ice(elnodes)) - ! this is exact computation (no assumption of u=const on elements used - ! in the standard version) - c1=(um*um+sum(u_ice(elnodes)*u_ice(elnodes)))/12.0_WP - c2=(vm*vm+sum(v_ice(elnodes)*v_ice(elnodes)))/12.0_WP - c3=(um*vm+sum(v_ice(elnodes)*u_ice(elnodes)))/12.0_WP - c4=sum(dx*u_ice(elnodes)+dy*v_ice(elnodes)) - DO n=1,3 - row=elnodes(n) - DO q = 1,3 - entries(q)= vol*ice_dt*((1.0_WP-0.5_WP*ice_dt*c4)*(dx(n)*(um+u_ice(elnodes(q)))+ & - dy(n)*(vm+v_ice(elnodes(q))))/12.0_WP - & - 0.5_WP*ice_dt*(c1*dx(n)*dx(q)+c2*dy(n)*dy(q)+c3*(dx(n)*dy(q)+dx(q)*dy(n)))) - !um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0) - entries2(q)=0.5_WP*ice_dt*(dx(n)*(um+u_ice(elnodes(q)))+ & - dy(n)*(vm+v_ice(elnodes(q)))-dx(q)*(um+u_ice(row))- & - dy(q)*(vm+v_ice(row))) - END DO - cx1=vol*ice_dt*c4*(sum(m_ice(elnodes))+m_ice(elnodes(n))+sum(entries2*m_ice(elnodes)))/12.0_WP - cx2=vol*ice_dt*c4*(sum(a_ice(elnodes))+a_ice(elnodes(n))+sum(entries2*a_ice(elnodes)))/12.0_WP - cx3=vol*ice_dt*c4*(sum(m_snow(elnodes))+m_snow(elnodes(n))+sum(entries2*m_snow(elnodes)))/12.0_WP -#if defined (__oifs) - cx4=vol*ice_dt*c4*(sum(ice_temp(elnodes))+ice_temp(elnodes(n))+sum(entries2*ice_temp(elnodes)))/12.0_WP + end do +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(diff, entries, um, vm, vol, dx, dy, n, q, row, elem, elnodes, c1, c2, c3, c4, cx1, cx2, cx3, cx4, entries2) +!$OMP DO + do elem=1,myDim_elem2D !assembling rhs over elements + elnodes=elem2D_nodes(:,elem) + + ! if cavity element skip it + if (ulevels(elem)>1) cycle + + !derivatives + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + vol=elem_area(elem) + um=sum(u_ice(elnodes)) + vm=sum(v_ice(elnodes)) + ! this is exact computation (no assumption of u=const on elements used + ! in the standard version) + c1=(um*um+sum(u_ice(elnodes)*u_ice(elnodes)))/12.0_WP + c2=(vm*vm+sum(v_ice(elnodes)*v_ice(elnodes)))/12.0_WP + c3=(um*vm+sum(v_ice(elnodes)*u_ice(elnodes)))/12.0_WP + c4=sum(dx*u_ice(elnodes)+dy*v_ice(elnodes)) + do n=1,3 + row=elnodes(n) + !!PS if(ulevels_nod2D(row)>1) cycle !LK89140 + do q = 1,3 + entries(q)= vol*ice%ice_dt*((1.0_WP-0.5_WP*ice%ice_dt*c4)*(dx(n)*(um+u_ice(elnodes(q)))+ & + dy(n)*(vm+v_ice(elnodes(q))))/12.0_WP - & + 0.5_WP*ice%ice_dt*(c1*dx(n)*dx(q)+c2*dy(n)*dy(q)+c3*(dx(n)*dy(q)+dx(q)*dy(n)))) + !um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0) + entries2(q)=0.5_WP*ice%ice_dt*(dx(n)*(um+u_ice(elnodes(q)))+ & + dy(n)*(vm+v_ice(elnodes(q)))-dx(q)*(um+u_ice(row))- & + dy(q)*(vm+v_ice(row))) + end do + + !___________________________________________________________________ + cx1=vol*ice%ice_dt*c4*(sum(m_ice(elnodes))+m_ice(elnodes(n))+sum(entries2*m_ice(elnodes)))/12.0_WP + cx2=vol*ice%ice_dt*c4*(sum(a_ice(elnodes))+a_ice(elnodes(n))+sum(entries2*a_ice(elnodes)))/12.0_WP + cx3=vol*ice%ice_dt*c4*(sum(m_snow(elnodes))+m_snow(elnodes(n))+sum(entries2*m_snow(elnodes)))/12.0_WP +#if defined (__oifs) || defined (__ifsinterface) + cx4=vol*ice%ice_dt*c4*(sum(ice_temp(elnodes))+ice_temp(elnodes(n))+sum(entries2*ice_temp(elnodes)))/12.0_WP #endif /* (__oifs) */ - rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes))+cx1 - rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes))+cx2 - rhs_ms(row)=rhs_ms(row)+sum(entries*m_snow(elnodes))+cx3 -#if defined (__oifs) - rhs_temp(row)=rhs_temp(row)+sum(entries*ice_temp(elnodes))+cx4 + !___________________________________________________________________ +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(row)) +#else +!$OMP ORDERED +#endif + rhs_m(row)=rhs_m(row)+sum(entries*m_ice(elnodes))+cx1 + rhs_a(row)=rhs_a(row)+sum(entries*a_ice(elnodes))+cx2 + rhs_ms(row)=rhs_ms(row)+sum(entries*m_snow(elnodes))+cx3 +#if defined (__oifs) || defined (__ifsinterface) + rhs_temp(row)=rhs_temp(row)+sum(entries*ice_temp(elnodes))+cx4 #endif /* (__oifs) */ - rhs_mdiv(row)=rhs_mdiv(row)-cx1 - rhs_adiv(row)=rhs_adiv(row)-cx2 - rhs_msdiv(row)=rhs_msdiv(row)-cx3 -#if defined (__oifs) - rhs_tempdiv(row)=rhs_tempdiv(row)-cx4 + !___________________________________________________________________ + rhs_mdiv(row)=rhs_mdiv(row)-cx1 + rhs_adiv(row)=rhs_adiv(row)-cx2 + rhs_msdiv(row)=rhs_msdiv(row)-cx3 +#if defined (__oifs) || defined (__ifsinterface) + rhs_tempdiv(row)=rhs_tempdiv(row)-cx4 #endif /* (__oifs) */ - - END DO - end do +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(row)) +#else +!$OMP END ORDERED +#endif + end do + end do +!$OMP END DO +!$OMP END PARALLEL end subroutine ice_TG_rhs_div ! ! !_______________________________________________________________________________ -subroutine ice_update_for_div(mesh) - use MOD_MESH - use O_MESH - use i_Arrays - use i_PARAM - use g_PARSUP +subroutine ice_update_for_div(ice, partit, mesh) + use MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use o_PARAM - USE g_CONFIG + use g_CONFIG use g_comm_auto implicit none - ! - integer :: n,i,clo,clo2,cn,location(100),row + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n,clo,clo2,cn,location(100),row real(kind=WP) :: rhs_new integer :: num_iter_solve=3 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_adiv, rhs_mdiv, rhs_msdiv + real(kind=WP), dimension(:), pointer :: a_icel, m_icel, m_snowl + real(kind=WP), dimension(:), pointer :: da_ice, dm_ice, dm_snow + real(kind=WP), dimension(:), pointer :: mass_matrix +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, m_templ, dm_temp, rhs_tempdiv +#endif +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + rhs_adiv => ice%data(1)%values_div_rhs(:) + rhs_mdiv => ice%data(2)%values_div_rhs(:) + rhs_msdiv => ice%data(3)%values_div_rhs(:) + a_icel => ice%data(1)%valuesl(:) + m_icel => ice%data(2)%valuesl(:) + m_snowl => ice%data(3)%valuesl(:) + da_ice => ice%data(1)%dvalues(:) + dm_ice => ice%data(2)%dvalues(:) + dm_snow => ice%data(3)%dvalues(:) + mass_matrix => ice%work%fct_massmatrix(:) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + m_templ => ice%data(4)%valuesl(:) + dm_temp => ice%data(4)%dvalues(:) + rhs_tempdiv => ice%data(4)%values_div_rhs(:) +#endif + !___________________________________________________________________________ ! Does Taylor-Galerkin solution - ! - !the first approximation + ! the first approximation +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) do row=1,myDim_nod2D - !! row=myList_nod2D(m) - !___________________________________________________________________________ + !! row=myList_nod2D(m) ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle + dm_ice(row) =rhs_mdiv(row) /area(1,row) da_ice(row) =rhs_adiv(row) /area(1,row) dm_snow(row)=rhs_msdiv(row)/area(1,row) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) dm_temp(row)=rhs_tempdiv(row)/area(1,row) #endif /* (__oifs) */ end do - call exchange_nod(dm_ice) - call exchange_nod(da_ice) - call exchange_nod(dm_snow) -#if defined (__oifs) - call exchange_nod(dm_temp) +!$OMP END PARALLEL DO + call exchange_nod(dm_ice, partit) + call exchange_nod(da_ice, partit) + call exchange_nod(dm_snow, partit) +#if defined (__oifs) || defined (__ifsinterface) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ - +!$OMP BARRIER + !___________________________________________________________________________ !iterate do n=1,num_iter_solve-1 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(row, n, clo, clo2, cn, location, rhs_new) +!$OMP DO do row=1,myDim_nod2D - !___________________________________________________________________________ ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle !! row=myList_nod2D(m) + !___________________________________________________________________ clo=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1)+1 clo2=ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(1) cn=clo2-clo+1 location(1:cn)=nn_pos(1:cn, row) - rhs_new=rhs_mdiv(row) - sum(mass_matrix(clo:clo2)*dm_ice(location(1:cn))) - m_icel(row)=dm_ice(row)+rhs_new/area(1,row) - rhs_new=rhs_adiv(row) - sum(mass_matrix(clo:clo2)*da_ice(location(1:cn))) - a_icel(row)=da_ice(row)+rhs_new/area(1,row) - rhs_new=rhs_msdiv(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) - m_snowl(row)=dm_snow(row)+rhs_new/area(1,row) -#if defined (__oifs) - rhs_new=rhs_tempdiv(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) - m_templ(row)=dm_temp(row)+rhs_new/area(1,row) + + !___________________________________________________________________ + rhs_new = rhs_mdiv(row) - sum(mass_matrix(clo:clo2)*dm_ice(location(1:cn))) + m_icel(row) = dm_ice(row)+rhs_new/area(1,row) + rhs_new = rhs_adiv(row) - sum(mass_matrix(clo:clo2)*da_ice(location(1:cn))) + a_icel(row) = da_ice(row)+rhs_new/area(1,row) + rhs_new = rhs_msdiv(row) - sum(mass_matrix(clo:clo2)*dm_snow(location(1:cn))) + m_snowl(row)= dm_snow(row)+rhs_new/area(1,row) +#if defined (__oifs) || defined (__ifsinterface) + rhs_new = rhs_tempdiv(row) - sum(mass_matrix(clo:clo2)*dm_temp(location(1:cn))) + m_templ(row)= dm_temp(row)+rhs_new/area(1,row) #endif /* (__oifs) */ end do +!$OMP END DO +!$OMP DO do row=1,myDim_nod2D - !___________________________________________________________________________ ! if cavity node skip it if (ulevels_nod2d(row)>1) cycle - !! row=myList_nod2D(m) - dm_ice(row)=m_icel(row) - da_ice(row)=a_icel(row) - dm_snow(row)=m_snowl(row) -#if defined (__oifs) - dm_temp(row)=m_templ(row) + dm_ice(row) = m_icel(row) + da_ice(row) = a_icel(row) + dm_snow(row) = m_snowl(row) +#if defined (__oifs) || defined (__ifsinterface) + dm_temp(row) = m_templ(row) #endif /* (__oifs) */ end do - call exchange_nod(dm_ice) - call exchange_nod(da_ice) - call exchange_nod(dm_snow) -#if defined (__oifs) - call exchange_nod(dm_temp) +!$OMP END DO +!$OMP END PARALLEL + call exchange_nod(dm_ice, partit) + call exchange_nod(da_ice, partit) + call exchange_nod(dm_snow, partit) +#if defined (__oifs) || defined (__ifsinterface) + call exchange_nod(dm_temp, partit) #endif /* (__oifs) */ +!$OMP BARRIER end do - m_ice=m_ice+dm_ice - a_ice=a_ice+da_ice - m_snow=m_snow+dm_snow -#if defined (__oifs) - ice_temp=ice_temp+dm_temp -#endif /* (__oifs) */ +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) + do row=1, myDim_nod2D+eDim_nod2D + m_ice(row) = m_ice (row)+dm_ice (row) + a_ice(row) = a_ice (row)+da_ice (row) + m_snow(row) = m_snow(row)+dm_snow(row) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp(row)= ice_temp(row)+dm_temp(row) +#endif /* (__oifs) */ + end do +!$OMP END PARALLEL DO end subroutine ice_update_for_div ! ============================================================= diff --git a/src/ice_maEVP.F90 b/src/ice_maEVP.F90 index 0dbdc8543..c33ab6a4e 100644 --- a/src/ice_maEVP.F90 +++ b/src/ice_maEVP.F90 @@ -1,112 +1,178 @@ module ice_maEVP_interfaces - interface - subroutine ssh2rhs(mesh) - use mod_mesh - type(t_mesh), intent(in), target :: mesh - end subroutine - - subroutine stress_tensor_a(mesh) - use mod_mesh - type(t_mesh), intent(in), target :: mesh - end subroutine - - subroutine stress2rhs_m(mesh) - use mod_mesh - type(t_mesh), intent(in), target :: mesh - end subroutine - - subroutine find_alpha_field_a(mesh) - use mod_mesh - type(t_mesh), intent(in), target :: mesh - end subroutine - - subroutine find_beta_field_a(mesh) - use mod_mesh - type(t_mesh), intent(in), target :: mesh - end subroutine - end interface + interface + subroutine ssh2rhs(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine stress_tensor_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine stress2rhs_m(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine find_alpha_field_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine find_beta_field_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module -! ==================================================================== +module ice_maEVPdynamics_interface + interface + subroutine EVPdynamics_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice + end subroutine + + subroutine EVPdynamics_m(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice + end subroutine + end interface +end module +! +! +!_______________________________________________________________________________ ! New evp implementation following Bouillion et al. 2013 ! and Kimmritz et al. 2015 (mEVP) and Kimmritz et al. 2016 (aEVP) -! ==================================================================== -subroutine stress_tensor_m(mesh) - ! Internal stress tensor - ! New implementation following Boullion et al, Ocean Modelling 2013. - ! SD, 30.07.2014 - !=================================================================== - use o_param - use i_param - use mod_mesh - use g_config - use i_arrays - use g_parsup - +! Internal stress tensor +! New implementation following Boullion et al, Ocean Modelling 2013. +! SD, 30.07.2014 +subroutine stress_tensor_m(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use mod_mesh + use g_config #if defined (__icepack) -use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif - - implicit none - - integer :: elem, elnodes(3) - real(kind=WP) :: dx(3), dy(3), msum, asum - real(kind=WP) :: eps1, eps2, pressure, delta - real(kind=WP) :: val3, meancos, usum, vsum, vale - real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) - det2=1.0_WP/(1.0_WP+alpha_evp) - det1=alpha_evp*det2 - do elem=1,myDim_elem2D - elnodes=elem2D_nodes(:,elem) - !_______________________________________________________________________ - ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(elem) > 1) cycle - - msum=sum(m_ice(elnodes))*val3 - if(msum<=0.01_WP) cycle !DS - asum=sum(a_ice(elnodes))*val3 - - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - ! METRICS: - vsum=sum(v_ice_aux(elnodes)) - usum=sum(u_ice_aux(elnodes)) - meancos=metric_factor(elem) - ! - ! ====== Deformation rate tensor on element elem: - eps11(elem)=sum(dx*u_ice_aux(elnodes)) - eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics - eps22(elem)=sum(dy*v_ice_aux(elnodes)) - eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) - eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics - - ! ======= Switch to eps1,eps2 - eps1=eps11(elem)+eps22(elem) - eps2=eps11(elem)-eps22(elem) - - ! ====== moduli: - delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) - delta=sqrt(delta) + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: elem, elnodes(3) + real(kind=WP) :: dx(3), dy(3), msum, asum + real(kind=WP) :: eps1, eps2, pressure, delta + real(kind=WP) :: val3, meancos, usum, vsum, vale + real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) + !___________________________________________________________________________ + val3=1.0_WP/3.0_WP + vale=1.0_WP/(ice%ellipse**2) + det2=1.0_WP/(1.0_WP+ice%alpha_evp) + det1=ice%alpha_evp*det2 + do elem=1,myDim_elem2D + elnodes=elem2D_nodes(:,elem) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + msum=sum(m_ice(elnodes))*val3 + if(msum<=0.01_WP) cycle !DS + asum=sum(a_ice(elnodes))*val3 + + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + ! METRICS: + vsum=sum(v_ice_aux(elnodes)) + usum=sum(u_ice_aux(elnodes)) + meancos=metric_factor(elem) + ! + ! ====== Deformation rate tensor on element elem: + eps11(elem)=sum(dx*u_ice_aux(elnodes)) + eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics + eps22(elem)=sum(dy*v_ice_aux(elnodes)) + eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) + eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics + + ! ======= Switch to eps1,eps2 + eps1=eps11(elem)+eps22(elem) + eps2=eps11(elem)-eps22(elem) + + ! ====== moduli: + delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) + delta=sqrt(delta) + #if defined (__icepack) - pressure = sum(strength(elnodes))*val3/max(delta,delta_min) + pressure = sum(strength(elnodes))*val3/max(delta,ice%delta_min) #else - pressure=pstar*msum*exp(-c_pressure*(1.0_WP-asum))/max(delta,delta_min) + pressure=ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum))/max(delta,ice%delta_min) #endif - - r1=pressure*(eps1-max(delta,delta_min)) + r1=pressure*(eps1-max(delta,ice%delta_min)) r2=pressure*eps2*vale r3=pressure*eps12(elem)*vale si1=sigma11(elem)+sigma22(elem) si2=sigma11(elem)-sigma22(elem) - + si1=det1*si1+det2*r1 si2=det1*si2+det2*r2 sigma12(elem)=det1*sigma12(elem)+det2*r3 @@ -117,628 +183,755 @@ subroutine stress_tensor_m(mesh) rdg_conv_elem(elem) = -min((eps11(elem)+eps22(elem)),0.0_WP) rdg_shear_elem(elem) = 0.5_WP*(delta - abs(eps11(elem)+eps22(elem))) #endif - - end do - ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of - ! Boullion et al Ocean Modelling 2013, but in an implicit mode: - ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), - ! and similarly for si2 and sigma12 + end do + ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of + ! Boullion et al Ocean Modelling 2013, but in an implicit mode: + ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), + ! and similarly for si2 and sigma12 end subroutine stress_tensor_m ! -! ================================================================== -! -subroutine ssh2rhs(mesh) - ! Compute the contribution from the elevation to the rhs - ! S.D. 30.07.2014 - use o_param - use i_param - use mod_mesh - use g_config - use i_arrays - use g_parsup - use i_therm_param - implicit none - - integer :: row, elem, elnodes(3), n - real(kind=WP) :: dx(3), dy(3), vol - real(kind=WP) :: val3, meancos, aa, bb, p_ice(3) - type(t_mesh), intent(in) , target :: mesh +! +!_______________________________________________________________________________ +! Compute the contribution from the elevation to the rhs +! S.D. 30.07.2014 +subroutine ssh2rhs(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + use o_param + use mod_mesh + use g_config + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: row, elem, elnodes(3), n + real(kind=WP) :: dx(3), dy(3), vol + real(kind=WP) :: val3, meancos, aa, bb, p_ice(3) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: m_ice, m_snow + real(kind=WP), dimension(:), pointer :: rhs_a, rhs_m + real(kind=WP), dimension(:), pointer :: elevation + real(kind=WP) , pointer :: rhoice, rhosno, inv_rhowat +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + elevation => ice%srfoce_ssh + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + inv_rhowat => ice%thermo%inv_rhowat + + !___________________________________________________________________________ + val3=1.0_WP/3.0_WP + + ! use rhs_m and rhs_a for storing the contribution from elevation: + do row=1, myDim_nod2d + rhs_a(row)=0.0_WP + rhs_m(row)=0.0_WP + end do -#include "associate_mesh.h" + !_____________________________________________________________________________ + ! use floating sea ice for zlevel and zstar + if (use_floatice .and. .not. trim(which_ale)=='linfs') then + do elem=1,myDim_elem2d + elnodes=elem2D_nodes(:,elem) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + !_______________________________________________________________________ + vol=elem_area(elem) + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + + !_______________________________________________________________________ + ! add pressure gradient from sea ice --> in case of floating sea ice + p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat + do n=1,3 + p_ice(n)=min(p_ice(n),max_ice_loading) + end do + + !_______________________________________________________________________ + bb=g*val3*vol + aa=bb*sum(dx*(elevation(elnodes)+p_ice)) + bb=bb*sum(dy*(elevation(elnodes)+p_ice)) + rhs_a(elnodes)=rhs_a(elnodes)-aa + rhs_m(elnodes)=rhs_m(elnodes)-bb + end do + else + do elem=1,myDim_elem2d + elnodes=elem2D_nodes(:,elem) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + vol=elem_area(elem) + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + bb=g*val3*vol + aa=bb*sum(dx*elevation(elnodes)) + bb=bb*sum(dy*elevation(elnodes)) + rhs_a(elnodes)=rhs_a(elnodes)-aa + rhs_m(elnodes)=rhs_m(elnodes)-bb + end do + end if +end subroutine ssh2rhs +! +! +!_______________________________________________________________________________ +! add internal stress to the rhs +! SD, 30.07.2014 +subroutine stress2rhs_m(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + use o_param + use mod_mesh + use g_config + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: k, row, elem, elnodes(3) + real(kind=WP) :: dx(3), dy(3), vol + real(kind=WP) :: val3, mf, aa, bb + real(kind=WP) :: mass, cluster_area, elevation_elem(3) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m + real(kind=WP) , pointer :: rhoice, rhosno +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + + !___________________________________________________________________________ + val3=1.0_WP/3.0_WP + + do row=1, myDim_nod2d + u_rhs_ice(row)=0.0_WP + v_rhs_ice(row)=0.0_WP + end do - val3=1.0_WP/3.0_WP - - ! use rhs_m and rhs_a for storing the contribution from elevation: - do row=1, myDim_nod2d - rhs_a(row)=0.0_WP - rhs_m(row)=0.0_WP - end do - - !_____________________________________________________________________________ - ! use floating sea ice for zlevel and zstar - if (use_floatice .and. .not. trim(which_ale)=='linfs') then do elem=1,myDim_elem2d elnodes=elem2D_nodes(:,elem) !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle if (ulevels(elem) > 1) cycle + + if(sum(a_ice(elnodes)) < 0.01_WP) cycle !DS - !_______________________________________________________________________ vol=elem_area(elem) dx=gradient_sca(1:3,elem) dy=gradient_sca(4:6,elem) - - !_______________________________________________________________________ - ! add pressure gradient from sea ice --> in case of floating sea ice - p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat - do n=1,3 - p_ice(n)=min(p_ice(n),max_ice_loading) + mf=metric_factor(elem) !metrics + + do k=1,3 + row=elnodes(k) + u_rhs_ice(row)=u_rhs_ice(row) - vol* & + (sigma11(elem)*dx(k)+sigma12(elem)*dy(k)) & + -vol*sigma12(elem)*val3*mf !metrics + v_rhs_ice(row)=v_rhs_ice(row) - vol* & + (sigma12(elem)*dx(k)+sigma22(elem)*dy(k)) & + +vol*sigma11(elem)*val3*mf ! metrics end do - - !_______________________________________________________________________ - bb=g*val3*vol - aa=bb*sum(dx*(elevation(elnodes)+p_ice)) - bb=bb*sum(dy*(elevation(elnodes)+p_ice)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb end do - else - do elem=1,myDim_elem2d - elnodes=elem2D_nodes(:,elem) + + do row=1, myDim_nod2d !_______________________________________________________________________ - ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(elem) > 1) cycle + ! if cavity node skip it + if ( ulevels_nod2d(row)>1 ) cycle - vol=elem_area(elem) - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - bb=g*val3*vol - aa=bb*sum(dx*elevation(elnodes)) - bb=bb*sum(dy*elevation(elnodes)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb + mass=(m_ice(row)*rhoice+m_snow(row)*rhosno) + mass=mass/(1.0_WP+mass*mass) + u_rhs_ice(row)=(u_rhs_ice(row)*mass + rhs_a(row))/area(1,row) + v_rhs_ice(row)=(v_rhs_ice(row)*mass + rhs_m(row))/area(1,row) end do - end if -end subroutine ssh2rhs -! -!=================================================================== -! -subroutine stress2rhs_m(mesh) - - ! add internal stress to the rhs - ! SD, 30.07.2014 - !----------------------------------------------------------------- - use o_param - use i_param - use i_therm_param - use mod_mesh - use g_config - use i_arrays - use g_parsup - implicit none - - integer :: k, row, elem, elnodes(3) - real(kind=WP) :: dx(3), dy(3), vol - real(kind=WP) :: val3, mf, aa, bb - real(kind=WP) :: mass, cluster_area, elevation_elem(3) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - val3=1.0_WP/3.0_WP - - do row=1, myDim_nod2d - u_rhs_ice(row)=0.0_WP - v_rhs_ice(row)=0.0_WP - end do - - do elem=1,myDim_elem2d - elnodes=elem2D_nodes(:,elem) - !_______________________________________________________________________ - ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(elem) > 1) cycle - - if(sum(a_ice(elnodes)) < 0.01_WP) cycle !DS - - vol=elem_area(elem) - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - mf=metric_factor(elem) !metrics - - do k=1,3 - row=elnodes(k) - u_rhs_ice(row)=u_rhs_ice(row) - vol* & - (sigma11(elem)*dx(k)+sigma12(elem)*dy(k)) & - -vol*sigma12(elem)*val3*mf !metrics - v_rhs_ice(row)=v_rhs_ice(row) - vol* & - (sigma12(elem)*dx(k)+sigma22(elem)*dy(k)) & - +vol*sigma11(elem)*val3*mf ! metrics - end do - end do - - do row=1, myDim_nod2d - !_________________________________________________________________________ - ! if cavity node skip it - if ( ulevels_nod2d(row)>1 ) cycle - - mass=(m_ice(row)*rhoice+m_snow(row)*rhosno) - mass=mass/(1.0_WP+mass*mass) - u_rhs_ice(row)=(u_rhs_ice(row)*mass + rhs_a(row))/area(1,row) - v_rhs_ice(row)=(v_rhs_ice(row)*mass + rhs_m(row))/area(1,row) - end do end subroutine stress2rhs_m ! -!=================================================================== ! -subroutine EVPdynamics_m(mesh) - ! assemble rhs and solve for ice velocity - ! New implementation based on Bouillion et al. Ocean Modelling 2013 - ! SD 30.07.14 - !--------------------------------------------------------- - - use o_param - use i_param - use i_therm_param - use mod_mesh - use g_config - use i_arrays - use o_arrays - use g_parsup - use g_comm_auto - +!_______________________________________________________________________________ +! assemble rhs and solve for ice velocity +! New implementation based on Bouillion et al. Ocean Modelling 2013 +! SD 30.07.14 +subroutine EVPdynamics_m(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use g_config + use o_arrays + use g_comm_auto #if defined (__icepack) - use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength - use icedrv_main, only: icepack_to_fesom + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: icepack_to_fesom #endif - - implicit none - integer :: steps, shortstep, i, ed,n - real(kind=WP) :: rdt, drag, det - real(kind=WP) :: inv_thickness(myDim_nod2D), umod, rhsu, rhsv - logical :: ice_el(myDim_elem2D), ice_nod(myDim_nod2D) - -!NR for stress_tensor_m - integer :: el, elnodes(3) - real(kind=WP) :: dx(3), dy(3), msum, asum - real(kind=WP) :: eps1, eps2, pressure, pressure_fac(myDim_elem2D), delta - real(kind=WP) :: val3, meancos, vale - real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 - -!NR for stress2rhs_m - integer :: k, row - real(kind=WP) :: vol - real(kind=WP) :: mf,aa, bb,p_ice(3) - real(kind=WP) :: mass(myDim_nod2D) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) - det2=1.0_WP/(1.0_WP+alpha_evp) - det1=alpha_evp*det2 - rdt=ice_dt - steps=evp_rheol_steps - - u_ice_aux=u_ice ! Initialize solver variables - v_ice_aux=v_ice - + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: steps, shortstep, i, ed,n + real(kind=WP) :: rdt, drag, det + real(kind=WP) :: inv_thickness(partit%myDim_nod2D), umod, rhsu, rhsv + logical :: ice_el(partit%myDim_elem2D), ice_nod(partit%myDim_nod2D) + !NR for stress_tensor_m + integer :: el, elnodes(3) + real(kind=WP) :: dx(3), dy(3), msum, asum + real(kind=WP) :: eps1, eps2, pressure, pressure_fac(partit%myDim_elem2D), delta + real(kind=WP) :: val3, meancos, vale + real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 + !NR for stress2rhs_m + integer :: k, row + real(kind=WP) :: vol + real(kind=WP) :: mf,aa, bb,p_ice(3) + real(kind=WP) :: mass(partit%myDim_nod2D) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice, rhs_a, rhs_m + real(kind=WP), dimension(:), pointer :: u_w, v_w + real(kind=WP), dimension(:), pointer :: elevation + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux #if defined (__icepack) - a_ice_old(:) = a_ice(:) - m_ice_old(:) = a_ice(:) - m_snow_old(:) = m_snow(:) - - call icepack_to_fesom (nx_in=(myDim_nod2D+eDim_nod2D), & - aice_out=a_ice, & - vice_out=m_ice, & - vsno_out=m_snow) + real(kind=WP), dimension(:), pointer :: a_ice_old, m_ice_old, m_snow_old +#endif + real(kind=WP) , pointer :: rhoice, rhosno, inv_rhowat +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + rhs_a => ice%data(1)%values_rhs(:) + rhs_m => ice%data(2)%values_rhs(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + elevation => ice%srfoce_ssh(:) + stress_atmice_x => ice%stress_atmice_x(:) + stress_atmice_y => ice%stress_atmice_y(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) +#if defined (__icepack) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) +#endif + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + inv_rhowat => ice%thermo%inv_rhowat + + !___________________________________________________________________________ + val3=1.0_WP/3.0_WP + vale=1.0_WP/(ice%ellipse**2) + det2=1.0_WP/(1.0_WP+ice%alpha_evp) + det1=ice%alpha_evp*det2 + rdt=ice%ice_dt + steps=ice%evp_rheol_steps + + !___________________________________________________________________________ + u_ice_aux=u_ice ! Initialize solver variables + v_ice_aux=v_ice + +#if defined (__icepack) + a_ice_old(:) = a_ice(:) + m_ice_old(:) = a_ice(:) + m_snow_old(:) = m_snow(:) + + call icepack_to_fesom (nx_in=(myDim_nod2D+eDim_nod2D), & + aice_out=a_ice, & + vice_out=m_ice, & + vsno_out=m_snow) #endif -!NR inlined, to have all initialization in one place. -! call ssh2rhs + !NR inlined, to have all initialization in one place. + ! call ssh2rhs - ! use rhs_m and rhs_a for storing the contribution from elevation: - do row=1, myDim_nod2d - rhs_a(row)=0.0_WP - rhs_m(row)=0.0_WP - end do + ! use rhs_m and rhs_a for storing the contribution from elevation: + do row=1, myDim_nod2d + rhs_a(row)=0.0_WP + rhs_m(row)=0.0_WP + end do - !_____________________________________________________________________________ - ! use floating sea ice for zlevel and zstar - if (use_floatice .and. .not. trim(which_ale)=='linfs') then - do el=1,myDim_elem2d - elnodes=elem2D_nodes(:,el) - - !_______________________________________________________________________ - ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(el) > 1) cycle - - !_______________________________________________________________________ - vol=elem_area(el) - dx=gradient_sca(1:3,el) - dy=gradient_sca(4:6,el) - - !_______________________________________________________________________ - ! add pressure gradient from sea ice --> in case of floating sea ice - p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat - do n=1,3 - p_ice(n)=min(p_ice(n),max_ice_loading) + !_____________________________________________________________________________ + ! use floating sea ice for zlevel and zstar + if (use_floatice .and. .not. trim(which_ale)=='linfs') then + do el=1,myDim_elem2d + elnodes=elem2D_nodes(:,el) + + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(el) > 1) cycle + + !_______________________________________________________________________ + vol=elem_area(el) + dx=gradient_sca(1:3,el) + dy=gradient_sca(4:6,el) + + !_______________________________________________________________________ + ! add pressure gradient from sea ice --> in case of floating sea ice + p_ice=(rhoice*m_ice(elnodes)+rhosno*m_snow(elnodes))*inv_rhowat + do n=1,3 + p_ice(n)=min(p_ice(n),max_ice_loading) + end do + + !_______________________________________________________________________ + bb=g*val3*vol + aa=bb*sum(dx*(elevation(elnodes)+p_ice)) + bb=bb*sum(dy*(elevation(elnodes)+p_ice)) + rhs_a(elnodes)=rhs_a(elnodes)-aa + rhs_m(elnodes)=rhs_m(elnodes)-bb end do - + !_____________________________________________________________________________ + ! use levitating sea ice for linfs, zlevel and zstar + else + do el=1,myDim_elem2d + elnodes=elem2D_nodes(:,el) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(el) > 1) cycle + + vol=elem_area(el) + dx=gradient_sca(1:3,el) + dy=gradient_sca(4:6,el) + bb=g*val3*vol + aa=bb*sum(dx*elevation(elnodes)) + bb=bb*sum(dy*elevation(elnodes)) + rhs_a(elnodes)=rhs_a(elnodes)-aa + rhs_m(elnodes)=rhs_m(elnodes)-bb + end do + end if + + !___________________________________________________________________________ + ! precompute thickness (the inverse is needed) and mass (scaled by area) + do i=1,myDim_nod2D + inv_thickness(i) = 0._WP + mass(i) = 0._WP + ice_nod(i) = .false. !_______________________________________________________________________ - bb=g*val3*vol - aa=bb*sum(dx*(elevation(elnodes)+p_ice)) - bb=bb*sum(dy*(elevation(elnodes)+p_ice)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb - end do - !_____________________________________________________________________________ - ! use levitating sea ice for linfs, zlevel and zstar - else - do el=1,myDim_elem2d + ! if cavity ndoe skip it + if ( ulevels_nod2d(i)>1 ) cycle + + if (a_ice(i) >= 0.01_WP) then + inv_thickness(i) = (rhoice*m_ice(i)+rhosno*m_snow(i))/a_ice(i) + inv_thickness(i) = 1.0_WP/max(inv_thickness(i), 9.0_WP) ! Limit the mass + + mass(i) = (m_ice(i)*rhoice+m_snow(i)*rhosno) + mass(i) = mass(i)/((1.0_WP+mass(i)*mass(i))*area(1,i)) + + ! scale rhs_a, rhs_m, too. + rhs_a(i) = rhs_a(i)/area(1,i) + rhs_m(i) = rhs_m(i)/area(1,i) + + ice_nod(i) = .true. + endif + enddo + + !___________________________________________________________________________ + ! precompute pressure factor + do el=1,myDim_elem2D elnodes=elem2D_nodes(:,el) + pressure_fac(el) = 0._WP + ice_el(el) = .false. + !_______________________________________________________________________ ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(el) > 1) cycle + if (ulevels(el) > 1) cycle - vol=elem_area(el) - dx=gradient_sca(1:3,el) - dy=gradient_sca(4:6,el) - bb=g*val3*vol - aa=bb*sum(dx*elevation(elnodes)) - bb=bb*sum(dy*elevation(elnodes)) - rhs_a(elnodes)=rhs_a(elnodes)-aa - rhs_m(elnodes)=rhs_m(elnodes)-bb + msum=sum(m_ice(elnodes))*val3 + if(msum > 0.01) then + ice_el(el) = .true. + asum=sum(a_ice(elnodes))*val3 + pressure_fac(el) = det2*ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum)) + endif end do - end if - -! precompute thickness (the inverse is needed) and mass (scaled by area) - do i=1,myDim_nod2D - inv_thickness(i) = 0._WP - mass(i) = 0._WP - ice_nod(i) = .false. - !_________________________________________________________________________ - ! if cavity ndoe skip it - if ( ulevels_nod2d(i)>1 ) cycle - - if (a_ice(i) >= 0.01_WP) then - inv_thickness(i) = (rhoice*m_ice(i)+rhosno*m_snow(i))/a_ice(i) - inv_thickness(i) = 1.0_WP/max(inv_thickness(i), 9.0_WP) ! Limit the mass - - mass(i) = (m_ice(i)*rhoice+m_snow(i)*rhosno) - mass(i) = mass(i)/((1.0_WP+mass(i)*mass(i))*area(1,i)) - - ! scale rhs_a, rhs_m, too. - rhs_a(i) = rhs_a(i)/area(1,i) - rhs_m(i) = rhs_m(i)/area(1,i) - ice_nod(i) = .true. - endif - enddo - -! precompute pressure factor - do el=1,myDim_elem2D - elnodes=elem2D_nodes(:,el) - - pressure_fac(el) = 0._WP - ice_el(el) = .false. - - !_______________________________________________________________________ - ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(el) > 1) cycle - - msum=sum(m_ice(elnodes))*val3 - if(msum > 0.01) then - ice_el(el) = .true. - asum=sum(a_ice(elnodes))*val3 - pressure_fac(el) = det2*pstar*msum*exp(-c_pressure*(1.0_WP-asum)) - endif - end do - - do row=1, myDim_nod2d - u_rhs_ice(row)=0.0_WP - v_rhs_ice(row)=0.0_WP - end do - -!======================================= -! Ice EVPdynamics Iteration main loop: -!======================================= + do row=1, myDim_nod2d + u_rhs_ice(row)=0.0_WP + v_rhs_ice(row)=0.0_WP + end do + !___________________________________________________________________________ + ! Ice EVPdynamics Iteration main loop: #if defined (__icepack) - rdg_conv_elem(:) = 0.0_WP - rdg_shear_elem(:) = 0.0_WP + rdg_conv_elem(:) = 0.0_WP + rdg_shear_elem(:) = 0.0_WP #endif - - do shortstep=1, steps - -!NR inlining, to make it easier to have local arrays and fuse loops -!NR call stress_tensor_m - ! Internal stress tensor - ! New implementation following Boullion et al, Ocean Modelling 2013. - ! SD, 30.07.2014 - !=================================================================== - - do el=1,myDim_elem2D - !__________________________________________________________________________ - if (ulevels(el)>1) cycle - - !__________________________________________________________________________ - if(ice_el(el)) then - - elnodes=elem2D_nodes(:,el) - dx=gradient_sca(1:3,el) - dy=gradient_sca(4:6,el) - ! METRICS: - meancos = val3*metric_factor(el) - ! - ! ====== Deformation rate tensor on element elem: - eps11(el) = sum(dx(:)*u_ice_aux(elnodes)) - sum(v_ice_aux(elnodes))*meancos !metrics - eps22(el) = sum(dy(:)*v_ice_aux(elnodes)) - eps12(el) = 0.5_WP*(sum(dy(:)*u_ice_aux(elnodes) + dx(:)*v_ice_aux(elnodes)) & - +sum(u_ice_aux(elnodes))*meancos ) !metrics - - ! ======= Switch to eps1,eps2 - eps1 = eps11(el) + eps22(el) - eps2 = eps11(el) - eps22(el) - - ! ====== moduli: - delta = sqrt(eps1**2+vale*(eps2**2+4.0_WP*eps12(el)**2)) - - pressure = pressure_fac(el)/(delta+delta_min) - -! si1 = det1*(sigma11(el)+sigma22(el)) + pressure*(eps1-delta) -! si2 = det1*(sigma11(el)-sigma22(el)) + pressure*eps2*vale -! sigma11(el) = 0.5_WP*(si1+si2) -! sigma22(el) = 0.5_WP*(si1-si2) -!NR directly insert si1, si2 cancels some operations and should increase accuracy - sigma12(el) = det1*sigma12(el) + pressure*eps12(el)*vale - sigma11(el) = det1*sigma11(el) + 0.5_WP*pressure*(eps1 - delta + eps2*vale) - sigma22(el) = det1*sigma22(el) + 0.5_WP*pressure*(eps1 - delta - eps2*vale) + do shortstep=1, steps + !NR inlining, to make it easier to have local arrays and fuse loops + !NR call stress_tensor_m + ! Internal stress tensor + ! New implementation following Boullion et al, Ocean Modelling 2013. + ! SD, 30.07.2014 + !_______________________________________________________________________ + do el=1,myDim_elem2D + if (ulevels(el)>1) cycle + + !___________________________________________________________________ + if(ice_el(el)) then + + elnodes=elem2D_nodes(:,el) + dx=gradient_sca(1:3,el) + dy=gradient_sca(4:6,el) + ! METRICS: + meancos = val3*metric_factor(el) + ! + ! ====== Deformation rate tensor on element elem: + eps11(el) = sum(dx(:)*u_ice_aux(elnodes)) - sum(v_ice_aux(elnodes))*meancos !metrics + eps22(el) = sum(dy(:)*v_ice_aux(elnodes)) + eps12(el) = 0.5_WP*(sum(dy(:)*u_ice_aux(elnodes) + dx(:)*v_ice_aux(elnodes)) & + +sum(u_ice_aux(elnodes))*meancos ) !metrics + + ! ======= Switch to eps1,eps2 + eps1 = eps11(el) + eps22(el) + eps2 = eps11(el) - eps22(el) + + ! ====== moduli: + delta = sqrt(eps1**2+vale*(eps2**2+4.0_WP*eps12(el)**2)) + + pressure = pressure_fac(el)/(delta+ice%delta_min) + + ! si1 = det1*(sigma11(el)+sigma22(el)) + pressure*(eps1-delta) + ! si2 = det1*(sigma11(el)-sigma22(el)) + pressure*eps2*vale + ! sigma11(el) = 0.5_WP*(si1+si2) + ! sigma22(el) = 0.5_WP*(si1-si2) + !NR directly insert si1, si2 cancels some operations and should increase accuracy + sigma12(el) = det1*sigma12(el) + pressure*eps12(el)*vale + sigma11(el) = det1*sigma11(el) + 0.5_WP*pressure*(eps1 - delta + eps2*vale) + sigma22(el) = det1*sigma22(el) + 0.5_WP*pressure*(eps1 - delta - eps2*vale) #if defined (__icepack) - rdg_conv_elem(el) = -min((eps11(el)+eps22(el)),0.0_WP) - rdg_shear_elem(el) = 0.5_WP*(delta - abs(eps11(el)+eps22(el))) + rdg_conv_elem(el) = -min((eps11(el)+eps22(el)),0.0_WP) + rdg_shear_elem(el) = 0.5_WP*(delta - abs(eps11(el)+eps22(el))) #endif - ! end do ! fuse loops - ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of - ! Boullion et al Ocean Modelling 2013, but in an implicit mode: - ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), - ! and similarly for si2 and sigma12 + ! end do ! fuse loops + ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of + ! Boullion et al Ocean Modelling 2013, but in an implicit mode: + ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), + ! and similarly for si2 and sigma12 + + !NR inlining call stress2rhs_m + ! add internal stress to the rhs + ! SD, 30.07.2014 + !----------------------------------------------------------------- + if (elnodes(1) <= myDim_nod2D) then + u_rhs_ice(elnodes(1)) = u_rhs_ice(elnodes(1)) - elem_area(el)* & + (sigma11(el)*dx(1)+sigma12(el)*(dy(1) + meancos)) !metrics + v_rhs_ice(elnodes(1)) = v_rhs_ice(elnodes(1)) - elem_area(el)* & + (sigma12(el)*dx(1)+sigma22(el)*dy(1) - sigma11(el)*meancos) ! metrics + end if + + if (elnodes(2) <= myDim_nod2D) then + u_rhs_ice(elnodes(2)) = u_rhs_ice(elnodes(2)) - elem_area(el)* & + (sigma11(el)*dx(2)+sigma12(el)*(dy(2) + meancos)) !metrics + v_rhs_ice(elnodes(2)) = v_rhs_ice(elnodes(2)) - elem_area(el)* & + (sigma12(el)*dx(2)+sigma22(el)*dy(2) - sigma11(el)*meancos) ! metrics + end if + + if (elnodes(3) <= myDim_nod2D) then + u_rhs_ice(elnodes(3)) = u_rhs_ice(elnodes(3)) - elem_area(el)* & + (sigma11(el)*dx(3)+sigma12(el)*(dy(3) + meancos)) !metrics + v_rhs_ice(elnodes(3)) = v_rhs_ice(elnodes(3)) - elem_area(el)* & + (sigma12(el)*dx(3)+sigma22(el)*dy(3) - sigma11(el)*meancos) ! metrics + end if + end if + end do ! --> do el=1,myDim_elem2D + + do i=1, myDim_nod2d + !___________________________________________________________________ + if (ulevels_nod2D(i)>1) cycle + + !___________________________________________________________________ + if (ice_nod(i)) then ! Skip if ice is absent + u_rhs_ice(i) = u_rhs_ice(i)*mass(i) + rhs_a(i) + v_rhs_ice(i) = v_rhs_ice(i)*mass(i) + rhs_m(i) + ! end do !NR fuse loops + !============= stress2rhs_m ends ====================== + ! do i=1,myDim_nod2D + umod = sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) + drag = rdt*ice%cd_oce_ice*umod*density_0*inv_thickness(i) + + !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) + rhsu = u_ice(i)+drag*u_w(i)+rdt*(inv_thickness(i)*stress_atmice_x(i)+u_rhs_ice(i)) + ice%beta_evp*u_ice_aux(i) + rhsv = v_ice(i)+drag*v_w(i)+rdt*(inv_thickness(i)*stress_atmice_y(i)+v_rhs_ice(i)) + ice%beta_evp*v_ice_aux(i) + + !solve (Coriolis and water stress are treated implicitly) + det = bc_index_nod2D(i) / ((1.0_WP+ice%beta_evp+drag)**2 + (rdt*mesh%coriolis_node(i))**2) + + u_ice_aux(i) = det*((1.0_WP+ice%beta_evp+drag)*rhsu +rdt*mesh%coriolis_node(i)*rhsv) + v_ice_aux(i) = det*((1.0_WP+ice%beta_evp+drag)*rhsv -rdt*mesh%coriolis_node(i)*rhsu) + end if + end do ! --> do i=1, myDim_nod2d - !NR inlining call stress2rhs_m - ! add internal stress to the rhs - ! SD, 30.07.2014 - !----------------------------------------------------------------- - if (elnodes(1) <= myDim_nod2D) then - u_rhs_ice(elnodes(1)) = u_rhs_ice(elnodes(1)) - elem_area(el)* & - (sigma11(el)*dx(1)+sigma12(el)*(dy(1) + meancos)) !metrics - v_rhs_ice(elnodes(1)) = v_rhs_ice(elnodes(1)) - elem_area(el)* & - (sigma12(el)*dx(1)+sigma22(el)*dy(1) - sigma11(el)*meancos) ! metrics - end if + !_______________________________________________________________________ + ! apply sea ice velocity boundary condition + do ed=1,myDim_edge2D + !___________________________________________________________________ + ! apply coastal sea ice velocity boundary conditions + if(myList_edge2D(ed) > edge2D_in) then + u_ice_aux(edges(:,ed))=0.0_WP + v_ice_aux(edges(:,ed))=0.0_WP + end if + + !___________________________________________________________________ + ! apply sea ice velocity boundary conditions at cavity-ocean edge + if (use_cavity) then + if ( (ulevels(edge_tri(1,ed))>1) .or. & + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then + u_ice_aux(edges(1:2,ed))=0.0_WP + v_ice_aux(edges(1:2,ed))=0.0_WP + end if + end if + end do ! --> do ed=1,myDim_edge2D + + !_______________________________________________________________________ + call exchange_nod_begin(u_ice_aux, v_ice_aux, partit) - if (elnodes(2) <= myDim_nod2D) then - u_rhs_ice(elnodes(2)) = u_rhs_ice(elnodes(2)) - elem_area(el)* & - (sigma11(el)*dx(2)+sigma12(el)*(dy(2) + meancos)) !metrics - v_rhs_ice(elnodes(2)) = v_rhs_ice(elnodes(2)) - elem_area(el)* & - (sigma12(el)*dx(2)+sigma22(el)*dy(2) - sigma11(el)*meancos) ! metrics - end if + do row=1, myDim_nod2d + u_rhs_ice(row)=0.0_WP + v_rhs_ice(row)=0.0_WP + end do - if (elnodes(3) <= myDim_nod2D) then - u_rhs_ice(elnodes(3)) = u_rhs_ice(elnodes(3)) - elem_area(el)* & - (sigma11(el)*dx(3)+sigma12(el)*(dy(3) + meancos)) !metrics - v_rhs_ice(elnodes(3)) = v_rhs_ice(elnodes(3)) - elem_area(el)* & - (sigma12(el)*dx(3)+sigma22(el)*dy(3) - sigma11(el)*meancos) ! metrics - end if - end if - end do + call exchange_nod_end(partit) + + end do ! --> do shortstep=1, steps + u_ice=u_ice_aux + v_ice=v_ice_aux - do i=1, myDim_nod2d - !__________________________________________________________________________ - if (ulevels_nod2D(i)>1) cycle - - !__________________________________________________________________________ - if (ice_nod(i)) then ! Skip if ice is absent - - u_rhs_ice(i) = u_rhs_ice(i)*mass(i) + rhs_a(i) - v_rhs_ice(i) = v_rhs_ice(i)*mass(i) + rhs_m(i) - - ! end do !NR fuse loops - !============= stress2rhs_m ends ====================== - - ! do i=1,myDim_nod2D - - umod = sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) - drag = rdt*Cd_oce_ice*umod*density_0*inv_thickness(i) - - !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) - rhsu = u_ice(i)+drag*u_w(i)+rdt*(inv_thickness(i)*stress_atmice_x(i)+u_rhs_ice(i)) + beta_evp*u_ice_aux(i) - rhsv = v_ice(i)+drag*v_w(i)+rdt*(inv_thickness(i)*stress_atmice_y(i)+v_rhs_ice(i)) + beta_evp*v_ice_aux(i) - - !solve (Coriolis and water stress are treated implicitly) - det = bc_index_nod2D(i) / ((1.0_WP+beta_evp+drag)**2 + (rdt*coriolis_node(i))**2) - - u_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsu +rdt*coriolis_node(i)*rhsv) - v_ice_aux(i) = det*((1.0_WP+beta_evp+drag)*rhsv -rdt*coriolis_node(i)*rhsu) - end if - end do - - call exchange_nod_begin(u_ice_aux, v_ice_aux) - - do row=1, myDim_nod2d - u_rhs_ice(row)=0.0_WP - v_rhs_ice(row)=0.0_WP - end do - - call exchange_nod_end - end do - - u_ice=u_ice_aux - v_ice=v_ice_aux end subroutine EVPdynamics_m ! ! -! -! ==================================================================== +!_______________________________________________________________________________ ! aEVP implementation: Similar to mEVP, but alpha is variable. ! The subroutines involved are with _a. -! ==================================================================== -! -subroutine find_alpha_field_a(mesh) - ! EVP stability parameter alpha is computed at each element - ! aEVP implementation - ! SD, 13.02.2017 - ! ================================================================== - use o_param - use i_param - use i_therm_param - use mod_mesh - use g_config - use i_arrays - use g_parsup - +! EVP stability parameter alpha is computed at each element +! aEVP implementation +! SD, 13.02.2017 +subroutine find_alpha_field_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use g_config #if defined (__icepack) - use icedrv_main, only: strength + use icedrv_main, only: strength #endif - - implicit none - - integer :: elem, elnodes(3) - real(kind=WP) :: dx(3), dy(3), msum, asum - real(kind=WP) :: eps1, eps2, pressure, delta - real(kind=WP) :: val3, meancos, usum, vsum, vale - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) - do elem=1,myDim_elem2D - elnodes=elem2D_nodes(:,elem) - !_______________________________________________________________________ - ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(elem) > 1) cycle + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: elem, elnodes(3) + real(kind=WP) :: dx(3), dy(3), msum, asum + real(kind=WP) :: eps1, eps2, pressure, delta + real(kind=WP) :: val3, meancos, usum, vsum, vale + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux + real(kind=WP), dimension(:), pointer :: alpha_evp_array + real(kind=WP) , pointer :: rhoice +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) + alpha_evp_array => ice%alpha_evp_array(:) + rhoice => ice%thermo%rhoice + + !___________________________________________________________________________ + val3=1.0_WP/3.0_WP + vale=1.0_WP/(ice%ellipse**2) + do elem=1,myDim_elem2D + elnodes=elem2D_nodes(:,elem) + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + msum=sum(m_ice(elnodes))*val3 + if(msum<=0.01_WP) cycle !DS + asum=sum(a_ice(elnodes))*val3 + + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + ! METRICS: + vsum=sum(v_ice_aux(elnodes)) + usum=sum(u_ice_aux(elnodes)) + meancos=metric_factor(elem) + ! + ! ====== Deformation rate tensor on element elem: + eps11(elem)=sum(dx*u_ice_aux(elnodes)) + eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics + eps22(elem)=sum(dy*v_ice_aux(elnodes)) + eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) + eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics - msum=sum(m_ice(elnodes))*val3 - if(msum<=0.01_WP) cycle !DS - asum=sum(a_ice(elnodes))*val3 - - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - ! METRICS: - vsum=sum(v_ice_aux(elnodes)) - usum=sum(u_ice_aux(elnodes)) - meancos=metric_factor(elem) - ! - ! ====== Deformation rate tensor on element elem: - eps11(elem)=sum(dx*u_ice_aux(elnodes)) - eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics - eps22(elem)=sum(dy*v_ice_aux(elnodes)) - eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) - eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics - - ! ======= Switch to eps1,eps2 - eps1=eps11(elem)+eps22(elem) - eps2=eps11(elem)-eps22(elem) - - ! ====== moduli: - delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) - delta=sqrt(delta) + ! ======= Switch to eps1,eps2 + eps1=eps11(elem)+eps22(elem) + eps2=eps11(elem)-eps22(elem) + + ! ====== moduli: + delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) + delta=sqrt(delta) #if defined (__icepack) - pressure = sum(strength(elnodes))*val3/(delta+delta_min)/msum + pressure = sum(strength(elnodes))*val3/(delta+ice%delta_min)/msum #else - pressure = pstar*exp(-c_pressure*(1.0_WP-asum))/(delta+delta_min) ! no multiplication + pressure = ice%pstar*exp(-ice%c_pressure*(1.0_WP-asum))/(delta+ice%delta_min) ! no multiplication ! with thickness (msum) #endif - !adjust c_aevp such, that alpha_evp_array and beta_evp_array become in acceptable range - alpha_evp_array(elem)=max(50.0_WP,sqrt(ice_dt*c_aevp*pressure/rhoice/elem_area(elem))) - ! /voltriangle(elem) for FESOM1.4 - ! We do not allow alpha to be too small! - end do - end subroutine find_alpha_field_a -! ==================================================================== - -subroutine stress_tensor_a(mesh) - ! Internal stress tensor - ! New implementation following Boullion et al, Ocean Modelling 2013. - ! and Kimmritz et al., Ocean Modelling 2016 - ! SD, 14.02.2017 - !=================================================================== - use o_param - use i_param - use mod_mesh - use g_config - use i_arrays - use g_parsup - + !adjust c_aevp such, that alpha_evp_array and beta_evp_array become in acceptable range + alpha_evp_array(elem)=max(50.0_WP,sqrt(ice%ice_dt*ice%c_aevp*pressure/rhoice/elem_area(elem))) + ! /voltriangle(elem) for FESOM1.4 + ! We do not allow alpha to be too small! + end do !--> do elem=1,myDim_elem2D +end subroutine find_alpha_field_a +! +! +!_______________________________________________________________________________ +! Internal stress tensor +! New implementation following Boullion et al, Ocean Modelling 2013. +! and Kimmritz et al., Ocean Modelling 2016 +! SD, 14.02.2017 +subroutine stress_tensor_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + use o_param + use mod_mesh + use g_config #if defined (__icepack) - use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem, strength #endif - - implicit none - - integer :: elem, elnodes(3) - real(kind=WP) :: dx(3), dy(3), msum, asum - real(kind=WP) :: eps1, eps2, pressure, delta - real(kind=WP) :: val3, meancos, usum, vsum, vale - real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - val3=1.0_WP/3.0_WP - vale=1.0_WP/(ellipse**2) - do elem=1,myDim_elem2D - !__________________________________________________________________________ - ! if element has any cavity node skip it - !!PS if ( any(ulevels_nod2d(elnodes)>1) ) cycle - if (ulevels(elem) > 1) cycle - - !__________________________________________________________________________ - det2=1.0_WP/(1.0_WP+alpha_evp_array(elem)) ! Take alpha from array - det1=alpha_evp_array(elem)*det2 - - elnodes=elem2D_nodes(:,elem) - - msum=sum(m_ice(elnodes))*val3 - if(msum<=0.01_WP) cycle !DS - asum=sum(a_ice(elnodes))*val3 - - dx=gradient_sca(1:3,elem) - dy=gradient_sca(4:6,elem) - ! METRICS: - vsum=sum(v_ice_aux(elnodes)) - usum=sum(u_ice_aux(elnodes)) - meancos=metric_factor(elem) - ! - ! ====== Deformation rate tensor on element elem: - eps11(elem)=sum(dx*u_ice_aux(elnodes)) - eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics - eps22(elem)=sum(dy*v_ice_aux(elnodes)) - eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) - eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics - - ! ======= Switch to eps1,eps2 - eps1=eps11(elem)+eps22(elem) - eps2=eps11(elem)-eps22(elem) - - ! ====== moduli: - delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) - delta=sqrt(delta) + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: elem, elnodes(3) + real(kind=WP) :: dx(3), dy(3), msum, asum + real(kind=WP) :: eps1, eps2, pressure, delta + real(kind=WP) :: val3, meancos, usum, vsum, vale + real(kind=WP) :: det1, det2, r1, r2, r3, si1, si2 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice + real(kind=WP), dimension(:), pointer :: eps11, eps12, eps22 + real(kind=WP), dimension(:), pointer :: sigma11, sigma12, sigma22 + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux + real(kind=WP), dimension(:), pointer :: alpha_evp_array +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + eps11 => ice%work%eps11(:) + eps12 => ice%work%eps12(:) + eps22 => ice%work%eps22(:) + sigma11 => ice%work%sigma11(:) + sigma12 => ice%work%sigma12(:) + sigma22 => ice%work%sigma22(:) + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) + alpha_evp_array => ice%alpha_evp_array(:) + + !___________________________________________________________________________ + val3=1.0_WP/3.0_WP + vale=1.0_WP/(ice%ellipse**2) + do elem=1,myDim_elem2D + !_______________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels(elem) > 1) cycle + + !_______________________________________________________________________ + det2=1.0_WP/(1.0_WP+alpha_evp_array(elem)) ! Take alpha from array + det1=alpha_evp_array(elem)*det2 + + elnodes=elem2D_nodes(:,elem) + + msum=sum(m_ice(elnodes))*val3 + if(msum<=0.01_WP) cycle !DS + asum=sum(a_ice(elnodes))*val3 + + dx=gradient_sca(1:3,elem) + dy=gradient_sca(4:6,elem) + ! METRICS: + vsum=sum(v_ice_aux(elnodes)) + usum=sum(u_ice_aux(elnodes)) + meancos=metric_factor(elem) + ! + ! ====== Deformation rate tensor on element elem: + eps11(elem)=sum(dx*u_ice_aux(elnodes)) + eps11(elem)=eps11(elem)-val3*vsum*meancos !metrics + eps22(elem)=sum(dy*v_ice_aux(elnodes)) + eps12(elem)=0.5_WP*sum(dy*u_ice_aux(elnodes) + dx*v_ice_aux(elnodes)) + eps12(elem)=eps12(elem)+0.5_WP*val3*usum*meancos !metrics + + ! ======= Switch to eps1,eps2 + eps1=eps11(elem)+eps22(elem) + eps2=eps11(elem)-eps22(elem) + + ! ====== moduli: + delta=eps1**2+vale*(eps2**2+4.0_WP*eps12(elem)**2) + delta=sqrt(delta) #if defined (__icepack) - pressure = sum(strength(elnodes))*val3/(delta+delta_min) + pressure = sum(strength(elnodes))*val3/(delta+ice%delta_min) #else - pressure=pstar*msum*exp(-c_pressure*(1.0_WP-asum))/(delta+delta_min) + pressure=ice%pstar*msum*exp(-ice%c_pressure*(1.0_WP-asum))/(delta+ice%delta_min) #endif r1=pressure*(eps1-delta) @@ -757,119 +950,181 @@ subroutine stress_tensor_a(mesh) rdg_conv_elem(elem) = -min((eps11(elem)+eps22(elem)),0.0_WP) rdg_shear_elem(elem) = 0.5_WP*(delta - abs(eps11(elem)+eps22(elem))) #endif - - end do - ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of - ! Boullion et al Ocean Modelling 2013, but in an implicit mode: - ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), - ! and similarly for si2 and sigma12 + end do ! --> do elem=1,myDim_elem2D + ! Equations solved in terms of si1, si2, eps1, eps2 are (43)-(45) of + ! Boullion et al Ocean Modelling 2013, but in an implicit mode: + ! si1_{p+1}=det1*si1_p+det2*r1, where det1=alpha/(1+alpha) and det2=1/(1+alpha), + ! and similarly for si2 and sigma12 end subroutine stress_tensor_a ! -!=================================================================== ! -subroutine EVPdynamics_a(mesh) - ! assemble rhs and solve for ice velocity - ! New implementation based on Bouillion et al. Ocean Modelling 2013 - ! and Kimmritz et al., Ocean Modelling 2016 - ! SD 14.02.17 - !--------------------------------------------------------- - -use o_param -use mod_mesh -use i_arrays -USE o_arrays -use i_param -use o_PARAM -use i_therm_param -use g_parsup -use g_comm_auto -use ice_maEVP_interfaces - +!_______________________________________________________________________________ +! assemble rhs and solve for ice velocity +! New implementation based on Bouillion et al. Ocean Modelling 2013 +! and Kimmritz et al., Ocean Modelling 2016 +! SD 14.02.17 +subroutine EVPdynamics_a(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + USE o_arrays + use o_PARAM + use g_config, only: use_cavity + use g_comm_auto + use ice_maEVP_interfaces #if defined (__icepack) - use icedrv_main, only: rdg_conv_elem, rdg_shear_elem + use icedrv_main, only: rdg_conv_elem, rdg_shear_elem #endif - - implicit none - integer :: steps, shortstep, i, ed - real(kind=WP) :: rdt, drag, det, fc - real(kind=WP) :: thickness, inv_thickness, umod, rhsu, rhsv - REAL(kind=WP) :: t0,t1, t2, t3, t4, t5, t00, txx - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - steps=evp_rheol_steps - rdt=ice_dt - u_ice_aux=u_ice ! Initialize solver variables - v_ice_aux=v_ice - call ssh2rhs(mesh) + implicit none + type(t_ice), intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: steps, shortstep, i, ed + real(kind=WP) :: rdt, drag, det, fc + real(kind=WP) :: thickness, inv_thickness, umod, rhsu, rhsv + REAL(kind=WP) :: t0,t1, t2, t3, t4, t5, t00, txx + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: u_rhs_ice, v_rhs_ice + real(kind=WP), dimension(:), pointer :: u_w, v_w + real(kind=WP), dimension(:), pointer :: stress_atmice_x, stress_atmice_y + real(kind=WP), dimension(:), pointer :: u_ice_aux, v_ice_aux + real(kind=WP), dimension(:), pointer :: beta_evp_array + real(kind=WP) , pointer :: rhoice, rhosno +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + u_rhs_ice => ice%uice_rhs(:) + v_rhs_ice => ice%vice_rhs(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + stress_atmice_x => ice%stress_atmice_x + stress_atmice_y => ice%stress_atmice_y + u_ice_aux => ice%uice_aux(:) + v_ice_aux => ice%vice_aux(:) + beta_evp_array => ice%beta_evp_array(:) + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + + !___________________________________________________________________________ + steps=ice%evp_rheol_steps + rdt=ice%ice_dt + u_ice_aux=u_ice ! Initialize solver variables + v_ice_aux=v_ice + call ssh2rhs(ice, partit, mesh) #if defined (__icepack) - rdg_conv_elem(:) = 0.0_WP - rdg_shear_elem(:) = 0.0_WP + rdg_conv_elem(:) = 0.0_WP + rdg_shear_elem(:) = 0.0_WP #endif - do shortstep=1, steps - call stress_tensor_a(mesh) - call stress2rhs_m(mesh) ! _m=_a, so no _m version is the only one! - do i=1,myDim_nod2D - - !_______________________________________________________________________ - ! if element has any cavity node skip it - if (ulevels_nod2d(i)>1) cycle + do shortstep=1, steps + call stress_tensor_a(ice, partit, mesh) + call stress2rhs_m(ice, partit, mesh) ! _m=_a, so no _m version is the only one! + do i=1,myDim_nod2D + + !___________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels_nod2d(i)>1) cycle + + thickness=(rhoice*m_ice(i)+rhosno*m_snow(i))/max(a_ice(i),0.01_WP) + thickness=max(thickness, 9.0_WP) ! Limit if it is too small (0.01 m) + inv_thickness=1.0_WP/thickness + + umod=sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) + drag=rdt*ice%cd_oce_ice*umod*density_0*inv_thickness + + !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) + rhsu=u_ice(i)+drag*u_w(i)+rdt*(inv_thickness*stress_atmice_x(i)+u_rhs_ice(i)) + rhsv=v_ice(i)+drag*v_w(i)+rdt*(inv_thickness*stress_atmice_y(i)+v_rhs_ice(i)) + + rhsu=beta_evp_array(i)*u_ice_aux(i)+rhsu + rhsv=beta_evp_array(i)*v_ice_aux(i)+rhsv + !solve (Coriolis and water stress are treated implicitly) + fc=rdt*mesh%coriolis_node(i) + det=(1.0_WP+beta_evp_array(i)+drag)**2+fc**2 + det=bc_index_nod2D(i)/det + u_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsu+fc*rhsv) + v_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsv-fc*rhsu) + end do + + !_______________________________________________________________________ + ! apply sea ice velocity boundary condition + do ed=1,myDim_edge2D + !___________________________________________________________________ + ! apply coastal sea ice velocity boundary conditions + if(myList_edge2D(ed) > edge2D_in) then + u_ice_aux(edges(:,ed))=0.0_WP + v_ice_aux(edges(:,ed))=0.0_WP + end if + + !___________________________________________________________________ + ! apply sea ice velocity boundary conditions at cavity-ocean edge + if (use_cavity) then + if ( (ulevels(edge_tri(1,ed))>1) .or. & + ( edge_tri(2,ed)>0 .and. ulevels(edge_tri(2,ed))>1) ) then + u_ice_aux(edges(1:2,ed))=0.0_WP + v_ice_aux(edges(1:2,ed))=0.0_WP + end if + end if + end do ! --> do ed=1,myDim_edge2D + + call exchange_nod(u_ice_aux, v_ice_aux, partit) + end do - thickness=(rhoice*m_ice(i)+rhosno*m_snow(i))/max(a_ice(i),0.01_WP) - thickness=max(thickness, 9.0_WP) ! Limit if it is too small (0.01 m) - inv_thickness=1.0_WP/thickness - - umod=sqrt((u_ice_aux(i)-u_w(i))**2+(v_ice_aux(i)-v_w(i))**2) - drag=rdt*Cd_oce_ice*umod*density_0*inv_thickness - - !rhs for water stress, air stress, and u_rhs_ice/v (internal stress + ssh) - rhsu=u_ice(i)+drag*u_w(i)+rdt*(inv_thickness*stress_atmice_x(i)+u_rhs_ice(i)) - rhsv=v_ice(i)+drag*v_w(i)+rdt*(inv_thickness*stress_atmice_y(i)+v_rhs_ice(i)) - - rhsu=beta_evp_array(i)*u_ice_aux(i)+rhsu - rhsv=beta_evp_array(i)*v_ice_aux(i)+rhsv - !solve (Coriolis and water stress are treated implicitly) - fc=rdt*coriolis_node(i) - det=(1.0_WP+beta_evp_array(i)+drag)**2+fc**2 - det=bc_index_nod2D(i)/det - u_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsu+fc*rhsv) - v_ice_aux(i)=det*((1.0_WP+beta_evp_array(i)+drag)*rhsv-fc*rhsu) - end do - call exchange_nod(u_ice_aux, v_ice_aux) - end do - u_ice=u_ice_aux v_ice=v_ice_aux - - call find_alpha_field_a(mesh) ! alpha_evp_array is initialized with alpha_evp; - ! At this stage we already have non-trivial velocities. - call find_beta_field_a(mesh) + + call find_alpha_field_a(ice, partit, mesh) ! alpha_evp_array is initialized with alpha_evp; + ! At this stage we already have non-trivial velocities. + call find_beta_field_a(ice, partit, mesh) end subroutine EVPdynamics_a ! -! ================================================================= ! -subroutine find_beta_field_a(mesh) +!_______________________________________________________________________________ ! beta_evp_array is defined at nodes, and this is the only ! reason we need it in addition to alpha_evp_array (we work with ! alpha=beta, and keep different names for generality; mEVP can work with ! alpha \ne beta, but not aEVP). - -use mod_mesh -use o_param -USE i_param -use i_arrays -use g_parsup -Implicit none -integer :: n - -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - +subroutine find_beta_field_a(ice, partit, mesh) + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE MOD_ICE + use o_param + Implicit none + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice + !___________________________________________________________________________ + integer :: n + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: alpha_evp_array, beta_evp_array +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + alpha_evp_array => ice%alpha_evp_array(:) + beta_evp_array => ice%beta_evp_array(:) + + !___________________________________________________________________________ DO n=1, myDim_nod2D + !________________________________________________________________________ + ! if element has any cavity node skip it + if (ulevels_nod2d(n)>1) cycle + ! ============== ! FESOM1.4 and stand-alone FESIM ! beta_evp_array(n) = maxval(alpha_evp_array(nod_in_elem2D(n)%addresses(1:nod_in_elem2D(n)%nmb))) diff --git a/src/ice_modules.F90 b/src/ice_modules.F90 index 4300aee6d..d49d7f786 100755 --- a/src/ice_modules.F90 +++ b/src/ice_modules.F90 @@ -1,155 +1,106 @@ -! ===================== -! Sea ice -! Finite-volume implementation -! Modules for coupled version -! Only EVP solver is available in this distrib. memory setup -! ====================== -! Ice velocity is defined at nodes -!=========================================================================== -! -MODULE i_PARAM - ! - ! Ice specific parameters - ! - USE o_PARAM - IMPLICIT NONE - SAVE - ! ice model parameters: - ! RHEOLOGY - REAL(kind=WP) :: Pstar = 30000.0_WP ![N/m^2] - REAL(kind=WP) :: ellipse =2.0_WP ! - REAL(kind=WP) :: c_pressure =20.0_WP ! - REAL(kind=WP) :: delta_min=1.0e-11 ! [s^(-1)] - REAL(kind=WP) :: Clim_evp=615 ! kg/m^2 - REAL(kind=WP) :: zeta_min=4.0e+8 ! kg/s - INTEGER :: evp_rheol_steps=120 ! EVP rheology - ! cybcycling steps - REAL(kind=WP) :: ice_gamma_fct=0.25_WP ! smoothing parameter - ! in ice fct advection - REAL(kind=WP) :: ice_diff=10.0_WP ! diffusion to stabilize - ! ice advection - REAL(kind=WP) :: Tevp_inv - real(kind=WP) :: theta_io=0.0_WP ! rotation angle - ! (ice-ocean), available - ! in EVP - real(kind=WP) :: alpha_evp=250, beta_evp=250 - real(kind=WP) :: c_aevp=0.15 ! 0.1--0.2, but should be adjusted experimentally - ! Ice forcing averaging - integer :: ice_ave_steps=1 !ice step=ice_ave_steps*oce_step - real(kind=WP) :: cd_oce_ice = 5.5e-3 ! drag coef. oce - ice - - logical :: ice_free_slip=.false. - integer :: whichEVP=0 !0=standart; 1=mEVP; 2=aEVP - real(kind=WP) :: ice_dt !ice step=ice_ave_steps*oce_step - -NAMELIST /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, Cd_oce_ice, & -ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp - -END MODULE i_PARAM -! -!============================================================================= -! -MODULE i_ARRAYS -! -! Arrays used to store ice variables and organize coupling -! -USE o_PARAM -implicit none -save - logical :: ice_update = .true. ! - integer :: ice_steps_since_upd = 0 ! - real(kind=WP),allocatable,dimension(:,:) :: ice_grad_vel - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice, V_ice, m_ice, a_ice - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_old, V_ice_old, m_ice_old, a_ice_old, m_snow_old,thdgr_old !PS - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_rhs_ice, V_rhs_ice, m_snow - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_m, rhs_a, rhs_ms, ths_temp - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_w, V_w - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: u_ice_aux, v_ice_aux ! of the size of u_ice, v_ice - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_mdiv, rhs_adiv, rhs_msdiv - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: elevation - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: sigma11, sigma12, sigma22 - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: eps11, eps12, eps22 - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: fresh_wa_flux - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: net_heat_flux -#if defined (__oasis) - real(kind=WP),target, allocatable, dimension(:) :: ice_alb, ice_temp ! new fields for OIFS coupling - real(kind=WP),target, allocatable, dimension(:) :: oce_heat_flux, ice_heat_flux - real(kind=WP),target, allocatable, dimension(:) :: tmp_oce_heat_flux, tmp_ice_heat_flux - !temporary flux fields - !(for flux correction) - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: rhs_temp, m_templ, dm_temp, rhs_tempdiv -#if defined (__oifs) - real(kind=WP),target, allocatable, dimension(:) :: enthalpyoffuse -#endif -#endif /* (__oasis) */ - - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: S_oc_array, T_oc_array - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_x - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_iceoce_y - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_x - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: stress_atmice_y - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: t_skin - ! FCT implementation - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_icel, a_icel, m_snowl - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: dm_ice, da_ice, dm_snow - REAL(kind=WP), ALLOCATABLE, DIMENSION(:,:) :: icefluxes - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: icepplus, icepminus - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: mass_matrix - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: alpha_evp_array(:) ! of myDim_elem2D - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: beta_evp_array(:) ! of myDim_node2D+eDim_node2D - -! Mean arrays - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: U_ice_mean, V_ice_mean - REAL(kind=WP), ALLOCATABLE, DIMENSION(:) :: m_ice_mean, a_ice_mean, m_snow_mean - END MODULE i_ARRAYS -!===================================================================== -module i_therm_param -USE o_PARAM - implicit none -REAL(kind=WP), parameter :: rhoair= 1.3 ! Air density, LY2004 !1.3 AOMIP -REAL(kind=WP), parameter :: inv_rhoair= 1./1.3 ! Air density, LY2004 !1.3 AOMIP -REAL(kind=WP), parameter :: rhowat= 1025. ! Water density -REAL(kind=WP), parameter :: inv_rhowat= 1./1025. ! Inverse Water density -REAL(kind=WP), parameter :: rhoice= 910. ! Ice density, AOMIP -REAL(kind=WP), parameter :: inv_rhoice= 1./910. ! Ice density, AOMIP -REAL(kind=WP), parameter :: rhosno= 290. ! Snow density, AOMIP -REAL(kind=WP), parameter :: inv_rhosno= 1./290. ! Snow density, AOMIP - -REAL(kind=WP), parameter :: cpair=1005. ! Specific heat of air [J/(kg * K)] -REAL(kind=WP), parameter :: cpice=2106. ! Specific heat of ice [J/(kg * K)] -REAL(kind=WP), parameter :: cpsno=2090. ! Specific heat of snow [J/(kg * K)] -REAL(kind=WP), parameter :: cc=rhowat*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) -REAL(kind=WP), parameter :: cl=rhoice*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) -REAL(kind=WP), parameter :: clhw=2.501e6 ! Specific latent heat [J/kg]: water -> water vapor -REAL(kind=WP), parameter :: clhi=2.835e6 ! sea ice-> water vapor - -REAL(kind=WP), parameter :: tmelt=273.15 ! 0 deg C expressed in K -REAL(kind=WP), parameter :: boltzmann=5.67E-8 ! S. Boltzmann const.*longw. emissivity - -REAL(kind=WP) :: con = 2.1656 ! Thermal conductivities: ice; W/m/K -REAL(kind=WP) :: consn = 0.31 ! snow - -REAL(kind=WP) :: Sice = 4.0 ! Ice salinity 3.2--5.0 ppt. - -integer :: iclasses=7 ! Number of ice thickness gradations for ice growth calcs. -REAL(kind=WP) :: h0=1.0 ! Lead closing parameter [m] ! 0.5 - -REAL(kind=WP) :: hmin= 0.01 ! Cut-off ice thickness !! -REAL(kind=WP) :: Armin=0.01 ! Minimum ice concentration !! - -REAL(kind=WP) :: emiss_ice=0.97 ! Emissivity of Snow/Ice, -REAL(kind=WP) :: emiss_wat=0.97 ! Emissivity of open water - -REAL(kind=WP) :: albsn= 0.81 ! Albedo: frozen snow -REAL(kind=WP) :: albsnm= 0.77 ! melting snow -REAL(kind=WP) :: albi= 0.70 ! frozen ice -REAL(kind=WP) :: albim= 0.68 ! melting ice -REAL(kind=WP) :: albw= 0.066 ! open water, LY2004 - - NAMELIST /ice_therm/ Sice, h0, emiss_ice, & - emiss_wat, albsn, albsnm, albi, albim, albw, con, consn - -end module i_therm_param - +! ! ! ===================== +! ! ! Sea ice +! ! ! Finite-volume implementation +! ! ! Modules for coupled version +! ! ! Only EVP solver is available in this distrib. memory setup +! ! ! ====================== +! ! ! Ice velocity is defined at nodes +! ! !=========================================================================== +! ! ! +! ! MODULE i_PARAM +! ! ! +! ! ! Ice specific parameters +! ! ! +! ! USE o_PARAM +! ! IMPLICIT NONE +! ! SAVE +! ! ! ice model parameters: +! ! ! RHEOLOGY +! ! ! REAL(kind=WP) :: Pstar = 30000.0_WP ![N/m^2] +! ! ! REAL(kind=WP) :: ellipse =2.0_WP ! +! ! ! REAL(kind=WP) :: c_pressure =20.0_WP ! +! ! ! REAL(kind=WP) :: delta_min=1.0e-11 ! [s^(-1)] +! ! ! REAL(kind=WP) :: Clim_evp=615 ! kg/m^2 +! ! +! ! +! ! ! REAL(kind=WP) :: zeta_min=4.0e+8 ! kg/s +! ! ! INTEGER :: evp_rheol_steps=120 ! EVP rheology +! ! ! cybcycling steps +! ! ! REAL(kind=WP) :: ice_gamma_fct=0.25_WP ! smoothing parameter +! ! ! in ice fct advection +! ! ! REAL(kind=WP) :: ice_diff=10.0_WP ! diffusion to stabilize +! ! ! ice advection +! ! ! REAL(kind=WP) :: Tevp_inv +! ! ! real(kind=WP) :: theta_io=0.0_WP ! rotation angle +! ! ! (ice-ocean), available +! ! ! in EVP +! ! ! real(kind=WP) :: alpha_evp=250, beta_evp=250 +! ! +! ! +! ! ! real(kind=WP) :: c_aevp=0.15 ! 0.1--0.2, but should be adjusted experimentally +! ! ! Ice forcing averaging +! ! ! integer :: ice_ave_steps=1 !ice step=ice_ave_steps*oce_step +! ! ! real(kind=WP) :: cd_oce_ice = 5.5e-3 ! drag coef. oce - ice +! ! +! ! ! logical :: ice_free_slip=.false. +! ! ! integer :: whichEVP=0 !0=standart; 1=mEVP; 2=aEVP +! ! ! real(kind=WP) :: ice_dt !ice step=ice_ave_steps*oce_step +! ! +! ! ! NAMELIST /ice_dyn/ whichEVP, Pstar, ellipse, c_pressure, delta_min, evp_rheol_steps, Cd_oce_ice, & +! ! ! ice_gamma_fct, ice_diff, theta_io, ice_ave_steps, alpha_evp, beta_evp, c_aevp +! ! +! ! ! NAMELIST /ice_dyn/ whichEVP, Cd_oce_ice, & +! ! ! ice_ave_steps +! ! + +! ! !===================================================================== +! ! module i_therm_param +! ! USE o_PARAM +! ! implicit none +! ! REAL(kind=WP), parameter :: rhoair= 1.3 ! Air density, LY2004 !1.3 AOMIP +! ! REAL(kind=WP), parameter :: inv_rhoair= 1./1.3 ! Air density, LY2004 !1.3 AOMIP +! ! REAL(kind=WP), parameter :: rhowat= 1025. ! Water density +! ! REAL(kind=WP), parameter :: inv_rhowat= 1./1025. ! Inverse Water density +! ! REAL(kind=WP), parameter :: rhoice= 910. ! Ice density, AOMIP +! ! REAL(kind=WP), parameter :: inv_rhoice= 1./910. ! Ice density, AOMIP +! ! REAL(kind=WP), parameter :: rhosno= 290. ! Snow density, AOMIP +! ! REAL(kind=WP), parameter :: inv_rhosno= 1./290. ! Snow density, AOMIP +! ! +! ! REAL(kind=WP), parameter :: cpair=1005. ! Specific heat of air [J/(kg * K)] +! ! REAL(kind=WP), parameter :: cpice=2106. ! Specific heat of ice [J/(kg * K)] +! ! REAL(kind=WP), parameter :: cpsno=2090. ! Specific heat of snow [J/(kg * K)] +! ! REAL(kind=WP), parameter :: cc=rhowat*4190.0 ! Volumetr. heat cap. of water [J/m**3/K](cc = rhowat*cp_water) +! ! REAL(kind=WP), parameter :: cl=rhoice*3.34e5 ! Volumetr. latent heat of ice fusion [J/m**3](cl=rhoice*Lf) +! ! REAL(kind=WP), parameter :: clhw=2.501e6 ! Specific latent heat [J/kg]: water -> water vapor +! ! REAL(kind=WP), parameter :: clhi=2.835e6 ! sea ice-> water vapor +! ! +! ! REAL(kind=WP), parameter :: tmelt=273.15 cd ! 0 deg C expressed in K +! ! REAL(kind=WP), parameter :: boltzmann=5.67E-8 ! S. Boltzmann const.*longw. emissivity +! ! +! ! REAL(kind=WP) :: con = 2.1656 ! Thermal conductivities: ice; W/m/K +! ! REAL(kind=WP) :: consn = 0.31 ! snow +! ! +! ! REAL(kind=WP) :: Sice = 4.0 ! Ice salinity 3.2--5.0 ppt. +! ! +! ! integer :: iclasses=7 ! Number of ice thickness gradations for ice growth calcs. +! ! REAL(kind=WP) :: h0=1.0 ! Lead closing parameter [m] ! 0.5 +! ! +! ! REAL(kind=WP) :: hmin= 0.01 ! Cut-off ice thickness !! +! ! REAL(kind=WP) :: Armin=0.01 ! Minimum ice concentration !! +! ! +! ! REAL(kind=WP) :: emiss_ice=0.97 ! Emissivity of Snow/Ice, +! ! REAL(kind=WP) :: emiss_wat=0.97 ! Emissivity of open water +! ! +! ! REAL(kind=WP) :: albsn= 0.81 ! Albedo: frozen snow +! ! REAL(kind=WP) :: albsnm= 0.77 ! melting snow +! ! REAL(kind=WP) :: albi= 0.70 ! frozen ice +! ! REAL(kind=WP) :: albim= 0.68 ! melting ice +! ! REAL(kind=WP) :: albw= 0.066 ! open water, LY2004 +! ! +! ! NAMELIST /ice_therm/ Sice, h0, emiss_ice, & +! ! emiss_wat, albsn, albsnm, albi, albim, albw, con, consn +! ! +! ! end module i_therm_param +! ! !============================================================================== diff --git a/src/ice_oce_coupling.F90 b/src/ice_oce_coupling.F90 index 2e8bd57fc..3d46063f8 100755 --- a/src/ice_oce_coupling.F90 +++ b/src/ice_oce_coupling.F90 @@ -1,30 +1,92 @@ +module ocean2ice_interface + interface + subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module + +module oce_fluxes_interface + interface + subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module + ! ! !_______________________________________________________________________________ -subroutine oce_fluxes_mom(mesh) - ! transmits the relevant fields from the ice to the ocean model - ! +! transmits the relevant fields from the ice to the ocean model +subroutine oce_fluxes_mom(ice, dynamics, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use o_PARAM use o_ARRAYS - use MOD_MESH - use i_ARRAYS - use g_PARSUP - use i_PARAM USE g_CONFIG use g_comm_auto - #if defined (__icepack) use icedrv_main, only: icepack_to_fesom #endif - implicit none - + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: n, elem, elnodes(3),n1 - real(kind=WP) :: aux, aux1 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - + real(kind=WP) :: aux + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice, a_ice, u_w, v_w + real(kind=WP), dimension(:), pointer :: stress_iceoce_x, stress_iceoce_y +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + stress_iceoce_x => ice%stress_iceoce_x(:) + stress_iceoce_y => ice%stress_iceoce_y(:) + ! ================== ! momentum flux: ! ================== @@ -34,7 +96,11 @@ subroutine oce_fluxes_mom(mesh) call icepack_to_fesom(nx_in=(myDim_nod2D+eDim_nod2D), & aice_out=a_ice) #endif + !___________________________________________________________________________ + ! compute total surface stress (iceoce+atmoce) on nodes +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, elem, elnodes, n1, aux) +!$OMP DO do n=1,myDim_nod2D+eDim_nod2D !_______________________________________________________________________ ! if cavity node skip it @@ -42,16 +108,21 @@ subroutine oce_fluxes_mom(mesh) !_______________________________________________________________________ if(a_ice(n)>0.001_WP) then - aux=sqrt((u_ice(n)-u_w(n))**2+(v_ice(n)-v_w(n))**2)*density_0*Cd_oce_ice + aux=sqrt((u_ice(n)-u_w(n))**2+(v_ice(n)-v_w(n))**2)*density_0*ice%cd_oce_ice stress_iceoce_x(n) = aux * (u_ice(n)-u_w(n)) stress_iceoce_y(n) = aux * (v_ice(n)-v_w(n)) else stress_iceoce_x(n)=0.0_WP stress_iceoce_y(n)=0.0_WP end if + + stress_node_surf(1,n) = stress_iceoce_x(n)*a_ice(n) + stress_atmoce_x(n)*(1.0_WP-a_ice(n)) + stress_node_surf(2,n) = stress_iceoce_y(n)*a_ice(n) + stress_atmoce_y(n)*(1.0_WP-a_ice(n)) end do - +!$OMP END DO !___________________________________________________________________________ + ! compute total surface stress (iceoce+atmoce) on elements +!$OMP DO DO elem=1,myDim_elem2D !_______________________________________________________________________ ! if cavity element skip it @@ -64,56 +135,81 @@ subroutine oce_fluxes_mom(mesh) stress_surf(2,elem)=sum(stress_iceoce_y(elnodes)*a_ice(elnodes) + & stress_atmoce_y(elnodes)*(1.0_WP-a_ice(elnodes)))/3.0_WP END DO - +!$OMP END DO +!$OMP END PARALLEL !___________________________________________________________________________ - if (use_cavity) call cavity_momentum_fluxes(mesh) + if (use_cavity) call cavity_momentum_fluxes(dynamics, partit, mesh) end subroutine oce_fluxes_mom ! ! !_______________________________________________________________________________ -subroutine ocean2ice(mesh) - - ! transmits the relevant fields from the ocean to the ice model - +! transmits the relevant fields from the ocean to the ice model +subroutine ocean2ice(ice, dynamics, tracers, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use o_PARAM - use o_ARRAYS - use i_ARRAYS - use MOD_MESH - use g_PARSUP - USE g_CONFIG + use g_CONFIG use g_comm_auto implicit none - - type(t_mesh), intent(in) , target :: mesh + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh integer :: n, elem, k real(kind=WP) :: uw, vw, vol - -#include "associate_mesh.h" - + real(kind=WP), dimension(:,:) , pointer :: temp, salt + real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w, elevation +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + UV => dynamics%uv(:,:,:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + T_oc_array => ice%srfoce_temp(:) + S_oc_array => ice%srfoce_salt(:) + elevation => ice%srfoce_ssh(:) + + !___________________________________________________________________________ ! the arrays in the ice model are renamed - - if (ice_update) then +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, elem, k, uw, vw, vol) + if (ice%ice_update) then +!$OMP DO do n=1, myDim_nod2d+eDim_nod2d if (ulevels_nod2D(n)>1) cycle - T_oc_array(n) = tr_arr(1,n,1) - S_oc_array(n) = tr_arr(1,n,2) + T_oc_array(n) = temp(1,n) + S_oc_array(n) = salt(1,n) elevation(n) = hbar(n) end do +!$OMP END DO else +!$OMP DO do n=1, myDim_nod2d+eDim_nod2d if (ulevels_nod2D(n)>1) cycle - T_oc_array(n) = (T_oc_array(n)*real(ice_steps_since_upd,WP)+tr_arr(1,n,1))/real(ice_steps_since_upd+1,WP) - S_oc_array(n) = (S_oc_array(n)*real(ice_steps_since_upd,WP)+tr_arr(1,n,2))/real(ice_steps_since_upd+1,WP) - elevation(n) = (elevation(n) *real(ice_steps_since_upd,WP)+ hbar(n))/real(ice_steps_since_upd+1,WP) - !NR !PS elevation(n)=(elevation(n)*real(ice_steps_since_upd)+eta_n(n))/real(ice_steps_since_upd+1,WP) - !NR elevation(n)=(elevation(n)*real(ice_steps_since_upd)+hbar(n))/real(ice_steps_since_upd+1,WP) !PS + T_oc_array(n) = (T_oc_array(n)*real(ice%ice_steps_since_upd,WP)+temp(1,n))/real(ice%ice_steps_since_upd+1,WP) + S_oc_array(n) = (S_oc_array(n)*real(ice%ice_steps_since_upd,WP)+salt(1,n))/real(ice%ice_steps_since_upd+1,WP) + elevation(n) = (elevation(n) *real(ice%ice_steps_since_upd,WP)+ hbar(n))/real(ice%ice_steps_since_upd+1,WP) end do -!!PS elevation(:)= (elevation(:)*real(ice_steps_since_upd)+hbar(:))/real(ice_steps_since_upd+1,WP) +!$OMP END DO end if - - u_w = 0.0_WP - v_w = 0.0_WP + +!$OMP DO + do n=1, myDim_nod2d+eDim_nod2d + u_w(n) = 0.0_WP + v_w(n) = 0.0_WP + end do +!$OMP END DO + +!$OMP DO do n=1, myDim_nod2d if (ulevels_nod2D(n)>1) cycle uw = 0.0_WP @@ -122,59 +218,91 @@ subroutine ocean2ice(mesh) do k=1, nod_in_elem2D_num(n) elem=nod_in_elem2D(k,n) if (ulevels(elem)>1) cycle - !uw = uw+ UV(1,1,elem)*elem_area(elem) - !vw = vw+ UV(2,1,elem)*elem_area(elem) vol = vol + elem_area(elem) uw = uw+ UV(1,1,elem)*elem_area(elem) vw = vw+ UV(2,1,elem)*elem_area(elem) end do - !!PS uw = uw/area(1,n)/3.0_WP - !!PS vw = vw/area(1,n)/3.0_WP uw = uw/vol vw = vw/vol - if (ice_update) then + if (ice%ice_update) then u_w(n)=uw v_w(n)=vw else - u_w(n)=(u_w(n)*real(ice_steps_since_upd,WP)+uw)/real(ice_steps_since_upd+1,WP) - v_w(n)=(v_w(n)*real(ice_steps_since_upd,WP)+vw)/real(ice_steps_since_upd+1,WP) + u_w(n)=(u_w(n)*real(ice%ice_steps_since_upd,WP)+uw)/real(ice%ice_steps_since_upd+1,WP) + v_w(n)=(v_w(n)*real(ice%ice_steps_since_upd,WP)+vw)/real(ice%ice_steps_since_upd+1,WP) endif end do - call exchange_nod(u_w, v_w) +!$OMP END DO +!$OMP END PARALLEL + call exchange_nod(u_w, v_w, partit) end subroutine ocean2ice ! ! !_______________________________________________________________________________ -subroutine oce_fluxes(mesh) - - use MOD_MESH - USE g_CONFIG - use o_ARRAYS - use i_ARRAYS - use g_comm_auto - use g_forcing_param, only: use_virt_salt - use g_forcing_arrays - use g_PARSUP - use g_support - use i_therm_param - +subroutine oce_fluxes(ice, dynamics, tracers, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use g_CONFIG + use o_ARRAYS + use g_comm_auto + use g_forcing_param, only: use_virt_salt + use g_forcing_arrays + use g_support + use cavity_interfaces #if defined (__icepack) - use icedrv_main, only: icepack_to_fesom, & - init_flux_atm_ocn + use icedrv_main, only: icepack_to_fesom, & + init_flux_atm_ocn #endif - - implicit none - type(t_mesh), intent(in) , target :: mesh - integer :: n, elem, elnodes(3),n1 - real(kind=WP) :: rsss, net - real(kind=WP), allocatable :: flux(:) - -#include "associate_mesh.h" + use cavity_interfaces + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, elem, elnodes(3),n1 + real(kind=WP) :: rsss, net + real(kind=WP), allocatable :: flux(:) + !___________________________________________________________________________ + real(kind=WP), dimension(:,:), pointer :: temp, salt + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: a_ice_old + real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + real(kind=WP) , pointer :: rhoice, rhosno, inv_rhowat +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + temp => tracers%data(1)%values(:,:) + salt => tracers%data(2)%values(:,:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_ice_old => ice%data(1)%values_old(:) + thdgr => ice%thermo%thdgr(:) + thdgrsn => ice%thermo%thdgrsn(:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + inv_rhowat => ice%thermo%inv_rhowat + !___________________________________________________________________________ allocate(flux(myDim_nod2D+eDim_nod2D)) - flux = 0.0_WP - + +!$OMP PARALLEL DO + do n=1, myDim_nod2d+eDim_nod2d + flux(n) = 0.0_WP + end do +!$OMP END PARALLEL DO + ! ================== ! heat and freshwater ! ================== @@ -208,17 +336,23 @@ subroutine oce_fluxes(mesh) call init_flux_atm_ocn() #else - heat_flux = -net_heat_flux - water_flux = -fresh_wa_flux -#endif - heat_flux_in=heat_flux ! sw_pene will change the heat_flux - - if (use_cavity) call cavity_heat_water_fluxes_3eq(mesh) - !!PS if (use_cavity) call cavity_heat_water_fluxes_2eq(mesh) - - !___________________________________________________________________________ - call exchange_nod(heat_flux, water_flux) +!$OMP PARALLEL DO + do n=1, myDim_nod2d+eDim_nod2d + heat_flux(n) = -net_heat_flux(n) + water_flux(n) = -fresh_wa_flux(n) + end do +!$OMP END PARALLEL DO +#endif +!$OMP PARALLEL DO + do n=1, myDim_nod2d+eDim_nod2d + heat_flux_in(n)=heat_flux(n) ! sw_pene will change the heat_flux + end do +!$OMP END PARALLEL DO + if (use_cavity) call cavity_heat_water_fluxes_3eq(ice, dynamics, tracers, partit, mesh) + !___________________________________________________________________________ + call exchange_nod(heat_flux, water_flux, partit) +!$OMP BARRIER !___________________________________________________________________________ ! on freshwater inflow/outflow or virtual salinity: ! 1. In zlevel & zstar the freshwater flux is applied in the update of the @@ -235,51 +369,75 @@ subroutine oce_fluxes(mesh) ! balance virtual salt flux if (use_virt_salt) then ! will remain zero otherwise rsss=ref_sss +!$OMP PARALLEL DO do n=1, myDim_nod2D+eDim_nod2D - !!PS if (ref_sss_local) rsss = tr_arr(1,n,2) - if (ref_sss_local) rsss = tr_arr(ulevels_nod2d(n),n,2) + if (ref_sss_local) rsss = salt(ulevels_nod2d(n),n) virtual_salt(n)=rsss*water_flux(n) end do - +!$OMP END PARALLEL DO if (use_cavity) then flux = virtual_salt where (ulevels_nod2d > 1) flux = 0.0_WP - call integrate_nod(flux, net, mesh) + call integrate_nod(flux, net, partit, mesh) else - call integrate_nod(virtual_salt, net, mesh) - end if - virtual_salt=virtual_salt-net/ocean_area + call integrate_nod(virtual_salt, net, partit, mesh) + end if +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + virtual_salt(n)=virtual_salt(n)-net/ocean_area + end do +!$OMP END PARALLEL DO end if - + +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + if (ulevels_nod2d(n) == 1) then + dens_flux(n)=sw_alpha(1,n) * heat_flux_in(n) / vcpw + sw_beta(1, n) * (relax_salt(n) + water_flux(n) * salt(1,n)) + else + dens_flux(n)=0.0_WP + end if + end do +!$OMP END PARALLEL DO !___________________________________________________________________________ ! balance SSS restoring to climatology - if (use_cavity) then + if (use_cavity) then do n=1, myDim_nod2D+eDim_nod2D relax_salt(n) = 0.0_WP - if (ulevels_nod2d(n)>1) cycle - !!PS relax_salt(n)=surf_relax_S*(Ssurf(n)-tr_arr(1,n,2)) - relax_salt(n)=surf_relax_S*(Ssurf(n)-tr_arr(ulevels_nod2d(n),n,2)) + if (ulevels_nod2d(n) > 1) cycle + relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(ulevels_nod2d(n),n)) end do else +!$OMP PARALLEL DO do n=1, myDim_nod2D+eDim_nod2D - !!PS relax_salt(n)=surf_relax_S*(Ssurf(n)-tr_arr(1,n,2)) - relax_salt(n)=surf_relax_S*(Ssurf(n)-tr_arr(ulevels_nod2d(n),n,2)) + relax_salt(n)=surf_relax_S*(Ssurf(n)-salt(ulevels_nod2d(n),n)) end do +!$OMP END PARALLEL DO end if ! --> if use_cavity=.true. relax_salt anyway zero where is cavity see above - call integrate_nod(relax_salt, net, mesh) - relax_salt=relax_salt-net/ocean_area + call integrate_nod(relax_salt, net, partit, mesh) +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + relax_salt(n)=relax_salt(n)-net/ocean_area + end do +!$OMP END PARALLEL DO !___________________________________________________________________________ ! enforce the total freshwater/salt flux be zero ! 1. water flux ! if (.not. use_virt_salt) can be used! ! we conserve only the fluxes from the database plus evaporation. - flux = evaporation-ice_sublimation & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean - +prec_rain & - +prec_snow*(1.0_WP-a_ice_old) & - +runoff - +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + flux(n) = evaporation(n) & + -ice_sublimation(n) & ! the ice2atmos subplimation does not contribute to the freshwater flux into the ocean + +prec_rain(n) & + +prec_snow(n)*(1.0_WP-a_ice_old(n)) & +#if defined (__oifs) + +residualifwflx(n) & ! balance residual ice flux only in coupled case +#endif + +runoff(n) + end do +!$OMP END PARALLEL DO ! --> In case of zlevel and zstar and levitating sea ice, sea ice is just sitting ! on top of the ocean without displacement of water, there the thermodynamic ! growth rates of sea ice have to be taken into account to preserve the fresh water @@ -290,27 +448,47 @@ subroutine oce_fluxes(mesh) ! salinity flux !!PS if ( .not. use_floatice .and. .not. use_virt_salt) then if (.not. use_virt_salt) then - flux = flux-thdgr*rhoice*inv_rhowat-thdgrsn*rhosno*inv_rhowat +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + flux(n) = flux(n)-thdgr(n)*rhoice*inv_rhowat-thdgrsn(n)*rhosno*inv_rhowat + end do +!$OMP END PARALLEL DO end if ! Also balance freshwater flux that come from ocean-cavity boundary if (use_cavity) then - if (.not. use_virt_salt) then + if (.not. use_virt_salt) then !zstar, zlevel ! only for full-free surface approach otherwise total ocean volume will drift where (ulevels_nod2d > 1) flux = -water_flux - else + else ! linfs where (ulevels_nod2d > 1) flux = 0.0_WP end if end if - - call integrate_nod(flux, net, mesh) + + ! compute total global net freshwater flux into the ocean + call integrate_nod(flux, net, partit, mesh) + + !___________________________________________________________________________ ! here the + sign must be used because we switched up the sign of the ! water_flux with water_flux = -fresh_wa_flux, but evap, prec_... and runoff still ! have there original sign - water_flux=water_flux+net/ocean_area + ! if use_cavity=.false. --> ocean_area == ocean_areawithcav + !! water_flux=water_flux+net/ocean_area + if (use_cavity) then + ! due to rigid lid approximation under the cavity we to not add freshwater + ! under the cavity for the freshwater balancing we do this only for the open + ! ocean + where (ulevels_nod2d == 1) water_flux=water_flux+net/ocean_area + else +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + water_flux(n)=water_flux(n)+net/ocean_area + end do +!$OMP END PARALLEL DO + end if !___________________________________________________________________________ - if (use_sw_pene) call cal_shortwave_rad(mesh) + if (use_sw_pene) call cal_shortwave_rad(ice, partit, mesh) !___________________________________________________________________________ deallocate(flux) diff --git a/src/ice_setup_step.F90 b/src/ice_setup_step.F90 index dbff0aa15..eab219213 100755 --- a/src/ice_setup_step.F90 +++ b/src/ice_setup_step.F90 @@ -1,211 +1,165 @@ -module ice_setup_step_interfaces - interface - subroutine ice_array_setup(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine +module ice_initial_state_interface + interface + subroutine ice_initial_state(ice, tracers, partit, mesh) + USE MOD_ICE + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module - subroutine ice_initial_state(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface +module ice_setup_interface + interface + subroutine ice_setup(ice, tracers, partit, mesh) + USE MOD_ICE + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module + +module ice_timestep_interface + interface + subroutine ice_timestep(istep, ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + integer , intent(in) :: istep + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module -! ! !_______________________________________________________________________________ ! ice initialization + array allocation + time stepping -subroutine ice_setup(mesh) +subroutine ice_setup(ice, tracers, partit, mesh) + USE MOD_ICE + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use o_param - use g_parsup - use i_param - use i_arrays use g_CONFIG - use mod_mesh - use ice_setup_step_interfaces + use ice_initial_state_interface + use ice_fct_interfaces implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit - ! ================ DO not change - ice_dt=real(ice_ave_steps,WP)*dt + !___________________________________________________________________________ + ! initialise ice derived type + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_init'//achar(27)//'[0m' + call ice_init(ice, partit, mesh) + + !___________________________________________________________________________ + ! DO not change + ice%ice_dt = real(ice%ice_ave_steps,WP)*dt ! ice_dt=dt - Tevp_inv=3.0_WP/ice_dt - Clim_evp=Clim_evp*(evp_rheol_steps/ice_dt)**2/Tevp_inv ! This is combination - ! it always enters - - ! ================ - call ice_array_setup(mesh) - call ice_fct_init(mesh) - ! ================ + ice%Tevp_inv = 3.0_WP/ice%ice_dt + ! This is combination it always enters + ice%Clim_evp = ice%Clim_evp*(ice%evp_rheol_steps/ice%ice_dt)**2/ice%Tevp_inv + + !___________________________________________________________________________ + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_init'//achar(27)//'[0m' + call ice_mass_matrix_fill(ice, partit, mesh) + + !___________________________________________________________________________ ! Initialization routine, user input is required - ! ================ !call ice_init_fields_test - call ice_initial_state(mesh) ! Use it unless running test example - if(mype==0) write(*,*) 'Ice is initialized' + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call ice_initial_state'//achar(27)//'[0m' + call ice_initial_state(ice, tracers, partit, mesh) ! Use it unless running test example + + if(partit%mype==0) write(*,*) 'Ice is initialized' end subroutine ice_setup ! ! !_______________________________________________________________________________ -subroutine ice_array_setup(mesh) -! -! inializing sea ice model -! -! Variables that serve for exchange with atmosphere are nodal, to keep -! back compatibility with FESOM input routines - -use o_param -use i_param -use MOD_MESH -use i_arrays -use g_parsup -USE g_CONFIG - -implicit none -type(t_mesh), intent(in) , target :: mesh -integer :: n_size, e_size, mn, k, n, n1, n2 - -#include "associate_mesh.h" - -n_size=myDim_nod2D+eDim_nod2D -e_size=myDim_elem2D+eDim_elem2D - -! Allocate memory for variables of ice model - allocate(u_ice(n_size), v_ice(n_size)) - allocate(U_rhs_ice(n_size), V_rhs_ice(n_size)) - allocate(sigma11(e_size), sigma12(e_size), sigma22(e_size)) - allocate(eps11(e_size), eps12(e_size), eps22(e_size)) - allocate(m_ice(n_size), a_ice(n_size), m_snow(n_size)) - allocate(rhs_m(n_size), rhs_a(n_size), rhs_ms(n_size)) - allocate(t_skin(n_size)) - allocate(U_ice_old(n_size), V_ice_old(n_size)) !PS - allocate(m_ice_old(n_size), a_ice_old(n_size), m_snow_old(n_size), thdgr_old(n_size)) !PS - if (whichEVP > 0) then - allocate(u_ice_aux(n_size), v_ice_aux(n_size)) - allocate(alpha_evp_array(myDim_elem2D)) - allocate(beta_evp_array(n_size)) - - alpha_evp_array=alpha_evp - beta_evp_array =alpha_evp ! alpha=beta works most reliable - u_ice_aux=0.0_WP - v_ice_aux=0.0_WP - end if - - allocate(rhs_mdiv(n_size), rhs_adiv(n_size), rhs_msdiv(n_size)) - - m_ice_old=0.0_WP !PS - a_ice_old=0.0_WP !PS - m_snow_old=0.0_WP !PS - thdgr_old=0.0_WP !PS - U_ice_old=0.0_WP !PS - V_ice_old=0.0_WP !PS - - rhs_m=0.0_WP - rhs_ms=0.0_WP - rhs_a=0.0_WP - m_ice=0.0_WP - a_ice=0.0_WP - m_snow=0.0_WP - U_rhs_ice=0.0_WP - V_rhs_ice=0.0_WP - U_ice=0.0_WP - V_ice=0.0_WP - sigma11=0.0_WP - sigma22=0.0_WP - sigma12=0.0_WP - eps11=0.0_WP - eps12=0.0_WP - eps22=0.0_WP - t_skin=0.0_WP - rhs_mdiv=0.0_WP - rhs_adiv=0.0_WP - rhs_msdiv=0.0_WP - - -! Allocate memory for arrays used in coupling -! with ocean and atmosphere - allocate(S_oc_array(n_size), T_oc_array(n_size)) ! copies of ocean T ans S - S_oc_array = 0.0_WP - T_oc_array = 0.0_WP - allocate(fresh_wa_flux(n_size), net_heat_flux(n_size)) - fresh_wa_flux = 0.0_WP - net_heat_flux = 0.0_WP - allocate(stress_atmice_x(n_size), stress_atmice_y(n_size)) - stress_atmice_x = 0.0_WP - stress_atmice_y = 0.0_WP - allocate(elevation(n_size)) ! =ssh of ocean - elevation = 0.0_WP - allocate(stress_iceoce_x(n_size), stress_iceoce_y(n_size)) - stress_iceoce_x = 0.0_WP - stress_iceoce_y = 0.0_WP - allocate(U_w(n_size), V_w(n_size)) ! =uf and vf of ocean at surface nodes -#if defined (__oasis) - allocate(oce_heat_flux(n_size), ice_heat_flux(n_size)) - allocate(tmp_oce_heat_flux(n_size), tmp_ice_heat_flux(n_size)) -#if defined (__oifs) - allocate(ice_alb(n_size), ice_temp(n_size), enthalpyoffuse(n_size)) - allocate(rhs_tempdiv(n_size), rhs_temp(n_size)) - ice_alb=0.6_WP - ice_temp=265.15_WP - rhs_tempdiv=0._WP - rhs_temp=0._WP - enthalpyoffuse=0._WP -#endif /* (__oifs) */ - oce_heat_flux=0._WP - ice_heat_flux=0._WP - tmp_oce_heat_flux=0._WP - tmp_ice_heat_flux=0._WP -#endif /* (__oasis) */ -end subroutine ice_array_setup -! -! -! -!_______________________________________________________________________________ ! Sea ice model step -subroutine ice_timestep(step, mesh) -use i_arrays -use o_param -use g_parsup -use g_CONFIG -use i_PARAM, only: whichEVP -use mod_mesh - +subroutine ice_timestep(step, ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use g_CONFIG + use ice_EVPdynamics_interface + use ice_maEVPdynamics_interface + use ice_fct_interfaces + use ice_thermodynamics_interfaces + use cavity_interfaces #if defined (__icepack) use icedrv_main, only: step_icepack #endif - -implicit none -type(t_mesh), intent(in) , target :: mesh -integer :: step,i -REAL(kind=WP) :: t0,t1, t2, t3 - + implicit none + integer , intent(in) :: step + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: i + REAL(kind=WP) :: t0,t1, t2, t3 #if defined (__icepack) -real(kind=WP) :: time_evp, time_advec, time_therm + real(kind=WP) :: time_evp, time_advec, time_therm #endif - -t0=MPI_Wtime() - + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp, a_ice +#endif +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) +#if defined (__oifs) || defined (__ifsinterface) + a_ice => ice%data(1)%values(:) + ice_temp => ice%data(4)%values(:) +#endif + !___________________________________________________________________________ + t0=MPI_Wtime() #if defined (__icepack) - call step_icepack(mesh, time_evp, time_advec, time_therm) ! EVP, advection and thermodynamic parts + call step_icepack(ice, mesh, time_evp, time_advec, time_therm) ! EVP, advection and thermodynamic parts #else !___________________________________________________________________________ ! ===== Dynamics - if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics...'//achar(27)//'[0m' - SELECT CASE (whichEVP) + SELECT CASE (ice%whichEVP) CASE (0) - call EVPdynamics(mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics...'//achar(27)//'[0m' + call EVPdynamics (ice, partit, mesh) CASE (1) - call EVPdynamics_m(mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics_m...'//achar(27)//'[0m' + call EVPdynamics_m(ice, partit, mesh) CASE (2) - call EVPdynamics_a(mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call EVPdynamics_a...'//achar(27)//'[0m' + call EVPdynamics_a(ice, partit, mesh) CASE DEFAULT if (mype==0) write(*,*) 'a non existing EVP scheme specified!' - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop END SELECT - if (use_cavity) call cavity_ice_clean_vel(mesh) + if (use_cavity) call cavity_ice_clean_vel(ice, partit, mesh) t1=MPI_Wtime() !___________________________________________________________________________ @@ -215,73 +169,110 @@ subroutine ice_timestep(step, mesh) ! call ice_fct_solve ! call cut_off ! new FCT routines from Sergey Danilov 08.05.2018 -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) +!$OMP PARALLEL DO do i=1,myDim_nod2D+eDim_nod2D ice_temp(i) = ice_temp(i)*a_ice(i) end do +!$OMP END PARALLEL DO #endif /* (__oifs) */ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_TG_rhs_div...'//achar(27)//'[0m' - call ice_TG_rhs_div(mesh) + call ice_TG_rhs_div (ice, partit, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_fct_solve...'//achar(27)//'[0m' - call ice_fct_solve(mesh) + call ice_fct_solve (ice, partit, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call ice_update_for_div...'//achar(27)//'[0m' - call ice_update_for_div(mesh) -#if defined (__oifs) + call ice_update_for_div(ice, partit, mesh) + +#if defined (__oifs) || defined (__ifsinterface) +!$OMP PARALLEL DO do i=1,myDim_nod2D+eDim_nod2D - if (a_ice(i)>0.0_WP) ice_temp(i) = ice_temp(i)/a_ice(i) + if (a_ice(i)>0.0_WP) ice_temp(i) = ice_temp(i)/max(a_ice(i), 1.e-6_WP) end do +!$OMP END PARALLEL DO #endif /* (__oifs) */ + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call cut_off...'//achar(27)//'[0m' - call cut_off + call cut_off(ice, partit, mesh) - if (use_cavity) call cavity_ice_clean_ma(mesh) + if (use_cavity) call cavity_ice_clean_ma(ice, partit, mesh) t2=MPI_Wtime() !___________________________________________________________________________ ! ===== Thermodynamic part if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call thermodynamics...'//achar(27)//'[0m' - call thermodynamics(mesh) + call thermodynamics(ice, partit, mesh) #endif /* (__icepack) */ + + !___________________________________________________________________________ +!$OMP PARALLEL DO + do i=1,myDim_nod2D+eDim_nod2D + if ( ( U_ice(i)/=0.0_WP .and. mesh%ulevels_nod2d(i)>1) .or. (V_ice(i)/=0.0_WP .and. mesh%ulevels_nod2d(i)>1) ) then + write(*,*) " --> found cavity velocity /= 0.0_WP , ", mype + write(*,*) " ulevels_nod2d(n) = ", mesh%ulevels_nod2d(i) + write(*,*) " U_ice(n) = ", U_ice(i) + write(*,*) " V_ice(n) = ", V_ice(i) + write(*,*) + end if + end do +!$OMP END PARALLEL DO t3=MPI_Wtime() rtime_ice = rtime_ice + (t3-t0) rtime_tot = rtime_tot + (t3-t0) if(mod(step,logfile_outfreq)==0 .and. mype==0) then - write(*,*) '___ICE STEP EXECUTION TIMES____________________________' + write(*,*) '___ICE STEP EXECUTION TIMES____________________________' #if defined (__icepack) - write(*,"(A, ES10.3)") ' Ice Dyn. :', time_evp + write(*,"(A, ES10.3)") ' Ice Dyn. :', time_evp write(*,"(A, ES10.3)") ' Ice Advect. :', time_advec write(*,"(A, ES10.3)") ' Ice Thermodyn. :', time_therm #else - write(*,"(A, ES10.3)") ' Ice Dyn. :', t1-t0 - write(*,"(A, ES10.3)") ' Ice Advect. :', t2-t1 - write(*,"(A, ES10.3)") ' Ice Thermodyn. :', t3-t2 + write(*,"(A, ES10.3)") ' Ice Dyn. :', t1-t0 + write(*,"(A, ES10.3)") ' Ice Advect. :', t2-t1 + write(*,"(A, ES10.3)") ' Ice Thermodyn. :', t3-t2 #endif /* (__icepack) */ - write(*,*) ' _______________________________' - write(*,"(A, ES10.3)") ' Ice TOTAL :', t3-t0 - write(*,*) + write(*,*) ' _______________________________' + write(*,"(A, ES10.3)") ' Ice TOTAL :', t3-t0 + write(*,*) endif - end subroutine ice_timestep ! ! !_______________________________________________________________________________ ! sets inital values or reads restart file for ice model -subroutine ice_initial_state(mesh) - use i_ARRAYs - use MOD_MESH +subroutine ice_initial_state(ice, tracers, partit, mesh) + USE MOD_ICE + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use o_PARAM use o_arrays - use g_parsup use g_CONFIG implicit none - ! - type(t_mesh), intent(in) , target :: mesh - integer :: i - character(MAX_PATH) :: filename - real(kind=WP), external :: TFrez ! Sea water freeze temperature. - -#include "associate_mesh.h" - + type(t_ice) , intent(inout), target :: ice + type(t_tracer), intent(in) , target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: i + character(MAX_PATH) :: filename + real(kind=WP), external :: TFrez ! Sea water freeze temperature. + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:), pointer :: u_ice, v_ice +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + + !___________________________________________________________________________ m_ice =0._WP a_ice =0._WP u_ice =0._WP @@ -292,10 +283,14 @@ subroutine ice_initial_state(mesh) do i=1,myDim_nod2D+eDim_nod2D !_______________________________________________________________________ - if (ulevels_nod2d(i)>1) cycle ! --> if cavity, no sea ice, no initial state + if (ulevels_nod2d(i)>1) then + !!PS m_ice(i) = 1.0e15_WP + !!PS m_snow(i) = 0.1e15_WP + cycle ! --> if cavity, no sea ice, no initial state + endif !_______________________________________________________________________ - if (tr_arr(1,i,1)< 0.0_WP) then + if (tracers%data(1)%values(1,i)< 0.0_WP) then if (geo_coord_nod2D(2,i)>0._WP) then m_ice(i) = 1.0_WP m_snow(i)= 0.1_WP diff --git a/src/ice_thermo_cpl.F90 b/src/ice_thermo_cpl.F90 index 57085894a..9cc3e4e45 100644 --- a/src/ice_thermo_cpl.F90 +++ b/src/ice_thermo_cpl.F90 @@ -1,5 +1,5 @@ -#if defined (__oasis) -subroutine thermodynamics(mesh) +#if defined (__oasis) || defined (__ifsinterface) +subroutine thermodynamics(ice, partit, mesh) !=================================================================== ! @@ -14,59 +14,22 @@ subroutine thermodynamics(mesh) ! Wolfgang Dorn (AWI), Oct-2012 (h0min adapted) ! !=================================================================== - !---- variables from oce_modules.F90 -#if 0 - use o_param, only: ref_sss, ref_sss_local -#ifdef use_fullfreesurf - use o_array, only: real_salt_flux -#endif - use g_parsup, only: myDim_nod2D, eDim_nod2D -#ifdef use_cavity - use o_mesh, only: coord_nod2D, ulevels_nod2D -#else - use o_mesh, only: coord_nod2D -#endif - - !---- variables from ice_modules.F90 - use i_dyn_parms, only: Cd_oce_ice - use i_therm_parms, only: rhowat, rhoice, rhosno, cc, cl, con, consn, Sice -#ifdef oifs - use i_array, only: a_ice, m_ice, m_snow, u_ice, v_ice, u_w, v_w & - , fresh_wa_flux, net_heat_flux, oce_heat_flux, ice_heat_flux, enthalpyoffuse, S_oc_array, T_oc_array -#else - use i_array, only: a_ice, m_ice, m_snow, u_ice, v_ice, u_w, v_w & - , fresh_wa_flux, net_heat_flux, oce_heat_flux, ice_heat_flux, S_oc_array, T_oc_array -#endif - - !---- variables from gen_modules_config.F90 - use g_config, only: dt - - !---- variables from gen_modules_forcing.F90 -#ifdef oifs - use g_forcing_arrays, only: shortwave, evap_no_ifrac, sublimation & - , prec_rain, prec_snow, runoff, evaporation, thdgr, thdgrsn, flice & - , enthalpyoffuse -#else - use g_forcing_arrays, only: shortwave, evap_no_ifrac, sublimation & - , prec_rain, prec_snow, runoff, evaporation, thdgr, thdgrsn, flice -#endif - !---- variables from gen_modules_rotate_grid.F90 - use g_rotate_grid, only: r2g -#endif use o_param - use mod_mesh - use i_therm_param - use i_param - use i_arrays + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH use g_config use g_forcing_param use g_forcing_arrays - use g_parsup use g_comm_auto use g_rotate_grid implicit none - + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !_____________________________________________________________________________ integer :: inod !---- prognostic variables (updated in `ice_growth') real(kind=WP) :: A, h, hsn, alb, t @@ -74,6 +37,8 @@ subroutine thermodynamics(mesh) real(kind=WP) :: a2ohf, a2ihf !---- evaporation and sublimation (provided by ECHAM) real(kind=WP) :: evap, subli + !---- add residual freshwater flux over ice to freshwater (setted in ice_growth) + real(kind=WP) :: resid !---- precipitation and runoff (provided by ECHAM) real(kind=WP) :: rain, snow, runo !---- ocean variables (provided by FESOM) @@ -86,30 +51,83 @@ subroutine thermodynamics(mesh) !---- geographical coordinates real(kind=WP) :: geolon, geolat !---- minimum and maximum of the lead closing parameter - real(kind=WP) :: h0min = 0.50, h0max = 1.5 - type(t_mesh), intent(in) , target :: mesh + real(kind=WP) :: h0min = 0.5, h0max = 1.5 real(kind=WP), parameter :: Aimin = 0.001, himin = 0.005 -#include "associate_mesh.h" - + !_____________________________________________________________________________ + ! pointer on necessary derived types + integer , pointer :: myDim_nod2D, eDim_nod2D + integer , dimension(:) , pointer :: ulevels_nod2D + real(kind=WP), dimension(:,:), pointer :: geo_coord_nod2D + real(kind=WP), dimension(:) , pointer :: u_ice, v_ice + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn + real(kind=WP), dimension(:) , pointer :: a_ice_old, m_ice_old, m_snow_old, thdgr_old + real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:) , pointer :: ice_temp, ice_alb, enthalpyoffuse +#endif +#if defined (__oasis) || defined (__ifsinterface) + real(kind=WP), dimension(:) , pointer :: oce_heat_flux, ice_heat_flux +#endif + real(kind=WP) , pointer :: rhoice, rhosno, rhowat, Sice, cl, cc, cpice, consn, con + myDim_nod2d=>partit%myDim_nod2D + eDim_nod2D =>partit%eDim_nod2D + ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D + geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D(:,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + thdgr => ice%thermo%thdgr(:) + thdgrsn => ice%thermo%thdgrsn(:) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) + thdgr_old => ice%thermo%thdgr_old + T_oc_array => ice%srfoce_temp(:) + S_oc_array => ice%srfoce_salt(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + fresh_wa_flux => ice%flx_fw(:) + net_heat_flux => ice%flx_h(:) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) + ice_alb => ice%atmcoupl%ice_alb(:) + enthalpyoffuse=> ice%atmcoupl%enthalpyoffuse(:) +#endif +#if defined (__oasis) || defined (__ifsinterface) + oce_heat_flux => ice%atmcoupl%oce_flx_h(:) + ice_heat_flux => ice%atmcoupl%ice_flx_h(:) +#endif + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + rhowat => ice%thermo%rhowat + Sice => ice%thermo%Sice + cl => ice%thermo%cl + cc => ice%thermo%cc + cpice => ice%thermo%cpice + consn => ice%thermo%consn + con => ice%thermo%con + rhoice => ice%thermo%rhoice + + + !_____________________________________________________________________________ rsss = ref_sss - !---- total evaporation (needed in oce_salt_balance.F90) - evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice - !---- loop over all surface node - do inod=1,myDim_nod2d+eDim_nod2d + do inod=1,myDim_nod2d+eDim_nod2D -#ifdef use_cavity if (ulevels_nod2D(inod) > 1) cycle -#endif A = a_ice(inod) h = m_ice(inod) hsn = m_snow(inod) -#if defined (__oifs) +#if defined (__oifs) || defined (__ifsinterface) a2ohf = oce_heat_flux(inod) + shortwave(inod) + enthalpyoffuse(inod) #else a2ohf = oce_heat_flux(inod) + shortwave(inod) @@ -121,7 +139,7 @@ subroutine thermodynamics(mesh) snow = prec_snow(inod) runo = runoff(inod) - ustar = sqrt(Cd_oce_ice)*sqrt((u_ice(inod)-u_w(inod))**2+(v_ice(inod)-v_w(inod))**2) + ustar = sqrt(ice%cd_oce_ice)*sqrt((u_ice(inod)-u_w(inod))**2+(v_ice(inod)-v_w(inod))**2) T_oc = T_oc_array(inod) S_oc = S_oc_array(inod) if (ref_sss_local) rsss = S_oc @@ -132,48 +150,54 @@ subroutine thermodynamics(mesh) rsf = 0._WP end if -#if defined (__oifs) - !---- different lead closing parameter for NH and SH - call r2g(geolon, geolat, coord_nod2d(1,inod), coord_nod2d(2,inod)) - if (geolat.lt.0.) then - h0min = 1.0 - h0max = 1.0 - else - h0min = 0.3 - h0max = 0.3 - endif -#endif /* (__oifs) */ +#if defined (__oifs) || defined (__ifsinterface) - call ice_growth -#if defined (__oifs) !---- For AWI-CM3 we calculate ice surface temp and albedo in fesom, ! then send those to OpenIFS where they are used to calucate the ! energy fluxes ---! t = ice_temp(inod) if(A>Aimin) then - call ice_surftemp(max(h/(max(A,Aimin)),0.05),hsn/(max(A,Aimin)),a2ihf,t) + call ice_surftemp(ice%thermo, max(h/(max(A,Aimin)),0.05), hsn/(max(A,Aimin)), a2ihf, t) ice_temp(inod) = t else ! Freezing temp of saltwater in K ice_temp(inod) = -0.0575_WP*S_oc_array(inod) + 1.7105e-3_WP*sqrt(S_oc_array(inod)**3) -2.155e-4_WP*(S_oc_array(inod)**2)+273.15_WP endif - call ice_albedo(h,hsn,t,alb) + call ice_albedo(ice%thermo, h, hsn, t, alb) ice_alb(inod) = alb #endif + call ice_growth - - a_ice(inod) = A - m_ice(inod) = h - m_snow(inod) = hsn - net_heat_flux(inod) = ehf - fresh_wa_flux(inod) = fw + !__________________________________________________________________________ + ! save old ice variables + m_ice_old(inod) = m_ice(inod) + m_snow_old(inod) = m_snow(inod) + a_ice_old(inod) = a_ice(inod) + thdgr_old(inod) = thdgr(inod) + + !__________________________________________________________________________ + ! save new ice variables + a_ice(inod) = A + m_ice(inod) = h + m_snow(inod) = hsn + net_heat_flux(inod) = ehf + fresh_wa_flux(inod) = fw if (.not. use_virt_salt) then real_salt_flux(inod)= rsf end if - thdgr(inod) = dhgrowth - thdgrsn(inod) = dhsngrowth - flice(inod) = dhflice - + thdgr(inod) = dhgrowth + thdgrsn(inod) = dhsngrowth + flice(inod) = dhflice + + !---- total evaporation (needed in oce_salt_balance.F90) = evap+subli + evaporation(inod) = evap + subli + ice_sublimation(inod)= subli + prec_rain(inod) = rain + prec_snow(inod) = snow + runoff(inod) = runo +#if defined (__oifs) + residualifwflx(inod) = resid +#endif enddo return @@ -184,9 +208,9 @@ subroutine thermodynamics(mesh) !=================================================================== subroutine ice_growth - + implicit none - + !---- thermodynamic production rates (pos.: growth; neg.: melting) real(kind=WP) :: dsnow, dslat, dhice, dhiow, dcice, dciow @@ -269,8 +293,10 @@ subroutine ice_growth !---- NOTE: evaporation and sublimation represent potential fluxes and !---- must be area-weighted (like the heat fluxes); in contrast, !---- precipitation (snow and rain) and runoff are effective fluxes - PmEice = A*snow + A*subli - PmEocn = rain + runo + (1._WP-A)*snow + (1._WP-A)*evap + subli = A*subli + evap = (1._WP-A)*evap + PmEice = A*snow + subli + PmEocn = evap + rain + (1._WP-A)*snow + runo !---- convert freshwater fluxes [m/s] into growth per time step dt [m] PmEice = PmEice*dt @@ -297,7 +323,8 @@ subroutine ice_growth else PmEice = 0._WP endif - + resid = PmEice/dt + !---- add residual freshwater flux over ice to freshwater flux over ocean PmEocn = PmEocn + PmEice PmEice = 0._WP @@ -309,8 +336,19 @@ subroutine ice_growth !---- snow melt rate over sea ice (dsnow <= 0) !---- if there is atmospheric melting over sea ice, first melt any !---- snow that is present, but do not melt more snow than available +#if defined (__oifs) || defined (__ifsinterface) + !---- new condition added - surface temperature must be + !---- larger than 273K to melt snow + if (t.gt.273_WP) then + dsnow = A*min(Qatmice-Qicecon,0._WP) + dsnow = max(dsnow*rhoice/rhosno,-hsn) + else + dsnow = 0.0_WP + endif +#else dsnow = A*min(Qatmice-Qicecon,0._WP) dsnow = max(dsnow*rhoice/rhosno,-hsn) +#endif !---- update snow thickness after atmospheric snow melt hsn = hsn + dsnow @@ -401,7 +439,7 @@ subroutine ice_growth !---- total freshwater mass flux into the ocean [kg/m**2/s] if (.not. use_virt_salt) then - fw = PmEocn*rhofwt - dhgrowth*rhoice - dhsngrowth*rhosno + fw = PmEocn*rhofwt - dhgrowth*rhoice - dhsngrowth*rhosno rsf = -dhgrowth*rhoice*Sice/rhowat else fw = PmEocn*rhofwt - dhgrowth*rhoice*(rsss-Sice)/rsss - dhsngrowth*rhosno @@ -438,14 +476,21 @@ subroutine ice_growth end if !---- convert freshwater mass flux [kg/m**2/s] into sea-water volume flux [m/s] - fw = fw/rhowat + fw = fw/rhowat + evap = evap *rhofwt/rhowat + rain = rain *rhofwt/rhowat + snow = snow *rhofwt/rhowat + runo = runo *rhofwt/rhowat + subli= subli*rhofwt/rhowat + resid= resid*rhofwt/rhowat + return end subroutine ice_growth - subroutine ice_surftemp(h,hsn,a2ihf,t) + subroutine ice_surftemp(ithermp, h,hsn,a2ihf,t) ! INPUT: ! a2ihf - Total atmo heat flux to ice ! A - Ice fraction @@ -455,9 +500,8 @@ subroutine ice_surftemp(h,hsn,a2ihf,t) ! INPUT/OUTPUT: ! t - Ice surface temperature - use i_therm_param implicit none - + type(t_ice_thermo), intent(in), target :: ithermp !---- atmospheric heat net flux into to ice (provided by OpenIFS) real(kind=WP) a2ihf !---- ocean variables (provided by FESOM) @@ -473,9 +517,16 @@ subroutine ice_surftemp(h,hsn,a2ihf,t) real(kind=WP) zcpdte real(kind=WP) zcprosn !---- local parameters - real(kind=WP), parameter :: dice = 0.05_WP ! ECHAM6's thickness for top ice "layer" + real(kind=WP), parameter :: dice = 0.10_WP ! Thickness for top ice "layer" !---- freezing temperature of sea-water [K] real(kind=WP) :: TFrezs + + real(kind=WP), pointer :: con, consn, cpsno, rhoice, rhosno + con => ice%thermo%con + consn => ice%thermo%consn + cpsno => ice%thermo%cpsno + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno !---- compute freezing temperature of sea-water from salinity TFrezs = -0.0575_WP*S_oc + 1.7105e-3_WP*sqrt(S_oc**3) - 2.155e-4_WP*(S_oc**2)+273.15 @@ -488,38 +539,44 @@ subroutine ice_surftemp(h,hsn,a2ihf,t) zcprosn=rhosno*cpsno/dt ! Specific Energy required to change temperature of 1m snow on ice [J/(sm³K)] zcpdte=zcpdt+zcprosn*hsn ! Combined Energy required to change temperature of snow + 0.05m of upper ice t=(zcpdte*t+a2ihf+zicefl)/(zcpdte+con/zsniced) ! New sea ice surf temp [K] - t=min(TFrezs,t) ! Not warmer than freezing please! + t=min(273.15_WP,t) end subroutine ice_surftemp - subroutine ice_albedo(h,hsn,t,alb) + subroutine ice_albedo(ithermp, h, hsn, t, alb) ! INPUT: - ! hsn - snow thickness, used for albedo parameterization [m] - ! t - temperature of snow/ice surface [C] + ! h - ice thickness [m] + ! hsn - snow thickness [m] + ! t - temperature of snow/ice surface [C] ! ! OUTPUT: - ! alb - snow albedo - use i_therm_param + ! alb - selected broadband albedo implicit none - - real(kind=WP) h - real(kind=WP) hsn - real(kind=WP) t - real(kind=WP) alb - + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) :: h + real(kind=WP) :: hsn + real(kind=WP) :: t + real(kind=WP) :: alb + real(kind=WP) :: geolat + real(kind=WP), pointer :: albsn, albi, albsnm, albim + albsn => ice%thermo%albsn + albi => ice%thermo%albi + albsnm => ice%thermo%albsnm + albim => ice%thermo%albim + ! set albedo ! ice and snow, freezing and melting conditions are distinguished if (h>0.0_WP) then if (t<273.15_WP) then ! freezing condition - if (hsn.gt.0.0_WP) then ! snow cover present - alb=albsn + if (hsn.gt.0.001_WP) then ! snow cover present + alb=albsn else ! no snow cover - alb=albi + alb=albi endif else ! melting condition - if (hsn.gt.0.0_WP) then ! snow cover present - alb=albsnm + if (hsn.gt.0.001_WP) then ! snow cover present + alb=albsnm else ! no snow cover - alb=albim + alb=albim endif endif else diff --git a/src/ice_thermo_oce.F90 b/src/ice_thermo_oce.F90 index a6fef3ea1..6cfdd0641 100755 --- a/src/ice_thermo_oce.F90 +++ b/src/ice_thermo_oce.F90 @@ -1,46 +1,137 @@ -!=================================================================== -subroutine cut_off() -use o_param -use i_arrays -implicit none - -where(a_ice>1.0_WP) - a_ice=1.0_WP -end where - -where(a_ice<0.1e-8_WP) - a_ice=0.0_WP -#if defined (__oifs) - m_ice=0.0_WP - m_snow=0.0_WP - ice_temp=273.15_WP +module ice_thermodynamics_interfaces + interface + subroutine thermodynamics(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + + subroutine cut_off(ice, partit, mesh) + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module + +module ice_therm_interface + interface + subroutine therm_ice(ithermp, h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & + ug,ustar,T_oc,S_oc,H_ML,t,ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & + rsf, dhgrowth, dhsngrowth, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) + USE MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) h, hsn, A, fsh, flo, Ta, qa, rain, snow, runo, rsss, evap_in, & + ug, ustar, T_oc, S_oc, H_ML, t, ice_dt, ch, ce, ch_i, ce_i, fw, ehf, & + dhgrowth, dhsngrowth, ahf, prec, subli, subli_i, rsf, & + rhow, show, rhice, shice, sh, thick, thact, lat, & + rh, rA, qhst, sn, hsntmp, o2ihf, evap, iflice, hflatow, & + hfsenow, hflwrdout, lid_clo + end subroutine + end interface +end module + +module ice_budget_interfaces + interface + subroutine budget(ithermp, hice, hsn, t, ta, qa, fsh, flo, ug, S_oc, ch_i, ce_i, fh, subli) + USE MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) hice, hsn, t, ta, qa, fsh, flo, ug, S_oc, ch_i, ce_i, fh, subli + end subroutine + + subroutine obudget(ithermp, qa, fsh, flo, t, ug, ta, ch, ce, fh, evap, hflatow, hfsenow, hflwrdout) + USE MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) qa, t, ta, fsh, flo, ug, ch, ce, fh, evap, hfsenow, & + hfradow, hflatow, hftotow, hflwrdout + end subroutine + + subroutine flooding(ithermp, h, hsn) + USE MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) h, hsn + end subroutine + end interface +end module +! +! +!_______________________________________________________________________________ +subroutine cut_off(ice, partit, mesh) + use o_param + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_ICE + use g_config, only: use_cavity + implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice + integer :: n + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice, m_ice, m_snow +#if defined (__oifs) || defined (__ifsinterface) + real(kind=WP), dimension(:), pointer :: ice_temp #endif /* (__oifs) */ -end where - -where(m_ice<0.1e-8_WP) - m_ice=0.0_WP -#if defined (__oifs) - m_snow=0.0_WP - a_ice=0.0_WP - ice_temp=273.15_WP +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) +#if defined (__oifs) || defined (__ifsinterface) + ice_temp => ice%data(4)%values(:) #endif /* (__oifs) */ -end where -#if defined (__oifs) -where(ice_temp>273.15_WP) - ice_temp=273.15_WP -end where + !___________________________________________________________________________ + ! lower cutoff: a_ice +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n) +DO n=1, myDim_nod2D+eDim_nod2D + if (a_ice(n) > 1.0_WP) a_ice(n)=1.0_WP + ! upper cutoff: a_ice + if (a_ice(n) < .1e-8_WP) then + a_ice(n)=0.0_WP +#if defined (__oifs) || defined (__ifsinterface) + m_ice(n) =0.0_WP + m_snow(n) =0.0_WP + ice_temp(n)=273.15_WP #endif /* (__oifs) */ - -#if defined (__oifs) -where(ice_temp < 173.15_WP .and. a_ice >= 0.1e-8_WP) - ice_temp=271.35_WP -end where + end if + !___________________________________________________________________________ + ! lower cutoff: m_ice + if (m_ice(n) < .1e-8_WP) then + m_ice(n)=0.0_WP +#if defined (__oifs) || defined (__ifsinterface) + m_snow(n) =0.0_WP + a_ice(n) =0.0_WP + ice_temp(n)=273.15_WP +#endif /* (__oifs) */ + end if + + !___________________________________________________________________________ +#if defined (__oifs) || defined (__ifsinterface) + if (ice_temp(n) > 273.15_WP) ice_temp(n)=273.15_WP #endif /* (__oifs) */ +#if defined (__oifs) || defined (__ifsinterface) + if (ice_temp(n) < 173.15_WP .and. a_ice(n) >= 0.1e-8_WP) ice_temp(n)=271.35_WP +#endif /* (__oifs) */ +END DO +!$OMP END PARALLEL DO end subroutine cut_off -#if !defined (__oasis) -!=================================================================== + +#if !defined (__oasis) && !defined (__ifsinterface) +!_______________________________________________________________________________ ! Sea-ice thermodynamics routines ! ! Coded by N. Yakovlev and S. Danilov. @@ -49,9 +140,8 @@ end subroutine cut_off ! by Ralph Timmermann. ! Adjusted for general forcing data and NlFs option, cleaned up, bug fixing, ! by Qiang Wang, 13.01.2009 -!---------------------------------------------------------------------------- - -subroutine thermodynamics(mesh) +!_______________________________________________________________________________ +subroutine thermodynamics(ice, partit, mesh) ! ! For every surface node, this subroutine extracts the information ! needed for computation of thermodydnamics, calls the relevant @@ -59,573 +149,671 @@ subroutine thermodynamics(mesh) ! variables. !------------------------------------------------------------------------ - use o_param - use mod_mesh - use i_therm_param - use i_param - use i_arrays - use g_config - use g_forcing_param - use g_forcing_arrays - use g_parsup - use g_comm_auto - use g_sbf, only: l_snow - implicit none - real(kind=WP) :: h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,rsf,evap_in - real(kind=WP) :: ug,ustar,T_oc,S_oc,h_ml,t,ch,ce,ch_i,ce_i,fw,ehf,evap - real(kind=WP) :: ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout, subli - real(kind=WP) :: lat - integer :: i, j, elem - real(kind=WP), allocatable :: ustar_aux(:) - real(kind=WP) lid_clo - - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - rsss=ref_sss - - ! u_ice and v_ice are at nodes - ! u_w, v_w are at nodes (interpolated from elements) - ! u_wind and v_wind are always at nodes - ! ================ - ! Friction velocity - ! ================ - allocate(ustar_aux(myDim_nod2D+eDim_nod2D)) - ustar_aux=0.0_WP - DO i=1, myDim_nod2D - ustar=0.0_WP - if(ulevels_nod2d(i)>1) cycle - ustar=((u_ice(i)-u_w(i))**2+ & - (v_ice(i)-v_w(i))**2) - ustar_aux(i)=sqrt(ustar*Cd_oce_ice) - END DO - call exchange_nod(ustar_aux) !TODO Why do we need it? - ! ================ - ! end: friction velocity - ! ================ - - do i=1, myDim_nod2d+eDim_nod2D - !__________________________________________________________________________ - ! if there is a cavity no sea ice thermodynamics is apllied - if(ulevels_nod2d(i)>1) cycle - - !__________________________________________________________________________ - h = m_ice(i) - hsn = m_snow(i) - A = a_ice(i) - fsh = shortwave(i) - flo = longwave(i) - Ta = Tair(i) - qa = shum(i) - if (.not. l_snow) then - if (Ta>=0.0_WP) then - rain=prec_rain(i) - snow=0.0_WP + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_param + use g_config + use g_forcing_param + use g_forcing_arrays + use g_comm_auto + use g_sbf, only: l_snow + use ice_therm_interface + implicit none + type(t_ice) , intent(inout), target :: ice + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + !_____________________________________________________________________________ + integer :: i, j, elem + real(kind=WP) :: h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,rsf,evap_in + real(kind=WP) :: ug,ustar,T_oc,S_oc,h_ml,t,ch,ce,ch_i,ce_i,fw,ehf,evap + real(kind=WP) :: ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout, subli + real(kind=WP) :: lid_clo + real(kind=WP) :: lat + + !_____________________________________________________________________________ + ! pointer on necessary derived types + integer , pointer :: myDim_nod2D, eDim_nod2D + integer , dimension(:) , pointer :: ulevels_nod2D + real(kind=WP), dimension(:,:), pointer :: geo_coord_nod2D + real(kind=WP), dimension(:) , pointer :: u_ice, v_ice + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: a_ice_old, m_ice_old, m_snow_old + real(kind=WP), dimension(:) , pointer :: thdgr, thdgrsn, thdgr_old, t_skin, ustar_aux + real(kind=WP), dimension(:) , pointer :: S_oc_array, T_oc_array, u_w, v_w + real(kind=WP), dimension(:) , pointer :: fresh_wa_flux, net_heat_flux + myDim_nod2d => partit%myDim_nod2D + eDim_nod2D => partit%eDim_nod2D + ulevels_nod2D (1 :myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D(:) + geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D(:,:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) + thdgr => ice%thermo%thdgr + thdgrsn => ice%thermo%thdgrsn + thdgr_old => ice%thermo%thdgr_old + t_skin => ice%thermo%t_skin + ustar_aux => ice%thermo%ustar + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + T_oc_array => ice%srfoce_temp(:) + S_oc_array => ice%srfoce_salt(:) + net_heat_flux => ice%flx_h(:) + fresh_wa_flux => ice%flx_fw(:) + + !___________________________________________________________________________ + rsss=ref_sss + + ! u_ice and v_ice are at nodes + ! u_w, v_w are at nodes (interpolated from elements) + ! u_wind and v_wind are always at nodes + !___________________________________________________________________________ + ! Friction velocity +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(i, j, elem, h, hsn, A, fsh, flo, Ta, qa, rain, snow, runo, rsss, rsf, evap_in, ug, ustar, T_oc, S_oc, & +!$OMP h_ml, t, ch, ce, ch_i, ce_i, fw, ehf, evap, ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout, & +!$OMP subli, lid_clo, lat) +!$OMP DO + do i=1, myDim_nod2D + ustar=0.0_WP + if(ulevels_nod2d(i)>1) cycle + ustar=((u_ice(i)-u_w(i))**2 + (v_ice(i)-v_w(i))**2) + ustar_aux(i)=sqrt(ustar*ice%cd_oce_ice) + end do +!$OMP END DO +!$OMP MASTER + call exchange_nod(ustar_aux, partit) +!$OMP END MASTER +!$OMP BARRIER + + !___________________________________________________________________________ +!$OMP DO + do i=1, myDim_nod2d+eDim_nod2D + !_______________________________________________________________________ + ! if there is a cavity no sea ice thermodynamics is apllied + if(ulevels_nod2d(i)>1) cycle + + !_______________________________________________________________________ + ! prepare inputs for ice thermodynamics step + h = m_ice(i) + hsn = m_snow(i) + A = a_ice(i) + fsh = shortwave(i) + flo = longwave(i) + Ta = Tair(i) + qa = shum(i) + if (.not. l_snow) then + if (Ta>=0.0_WP) then + rain=prec_rain(i) + snow=0.0_WP + else + rain=0.0_WP + snow=prec_rain(i) + endif + evap_in=evaporation(i) !evap_in: positive up + else + rain = prec_rain(i) + snow = prec_snow(i) + evap_in=0.0_WP + end if + runo = runoff(i) + ug = sqrt(u_wind(i)**2+v_wind(i)**2) + ustar = ustar_aux(i) + T_oc = T_oc_array(i) + S_oc = S_oc_array(i) + if(ref_sss_local) rsss = S_oc + t = t_skin(i) + ch = Ch_atm_oce_arr(i) + ce = Ce_atm_oce_arr(i) + ch_i = Ch_atm_ice + ce_i = Ce_atm_ice + h_ml = 2.5_WP ! 10.0 or 30. used previously + fw = 0.0_WP + ehf = 0.0_WP + lid_Clo=ice%thermo%h0 + if (geo_coord_nod2D(2, i)>0) then !TODO 2 separate pars for each hemisphere + lid_clo=0.5_WP else - rain=0.0_WP - snow=prec_rain(i) + lid_clo=0.5_WP endif - evap_in=evaporation(i) !evap_in: positive up - else - rain = prec_rain(i) - snow = prec_snow(i) - evap_in=0.0_WP - end if - runo = runoff(i) - ug = sqrt(u_wind(i)**2+v_wind(i)**2) - ustar = ustar_aux(i) - T_oc = T_oc_array(i) - S_oc = S_oc_array(i) - if(ref_sss_local) rsss = S_oc - t = t_skin(i) - ch = Ch_atm_oce_arr(i) - ce = Ce_atm_oce_arr(i) - ch_i = Ch_atm_ice - ce_i = Ce_atm_ice -!!PS h_ml = 10.0_WP ! 10.0 or 30. used previously -!!PS h_ml = 5.0_WP ! 10.0 or 30. used previously - h_ml = 2.5_WP ! 10.0 or 30. used previously -!!PS h_ml = 1.25_WP ! 10.0 or 30. used previously - fw = 0.0_WP - ehf = 0.0_WP - lid_Clo=h0 - if (geo_coord_nod2D(2,i)>0) then !TODO 2 separate pars for each hemisphere - lid_clo=0.5_WP - else - lid_clo=0.5_WP - endif - - call therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & - ug,ustar,T_oc,S_oc,h_ml,t,ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & - rsf, ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) - - m_ice_old(i) = m_ice(i) !PS - m_snow_old(i) = m_snow(i) !PS - a_ice_old(i) = a_ice(i) !PS - thdgr_old(i) = thdgr(i) !PS - - m_ice(i) = h - m_snow(i) = hsn - a_ice(i) = A - t_skin(i) = t - fresh_wa_flux(i) = fw !positive down - net_heat_flux(i) = ehf !positive down - evaporation(i) = evap !negative up - ice_sublimation(i)= subli - - thdgr(i) = ithdgr - thdgrsn(i) = ithdgrsn - flice(i) = iflice - olat_heat(i) = hflatow - osen_heat(i) = hfsenow - olwout(i) = hflwrdout - - ! real salt flux due to salinity that is contained in the sea ice 4-5 psu - real_salt_flux(i)= rsf !PS - - end do - deallocate(ustar_aux) + + !_______________________________________________________________________ + ! do ice thermodynamics + call therm_ice(ice%thermo,h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & + ug,ustar,T_oc,S_oc,h_ml,t,ice%ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & + rsf, ithdgr, ithdgrsn, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) + + !_______________________________________________________________________ + ! write ice thermodyn. results into arrays + ! backup of old values + m_ice_old(i) = m_ice(i) !PS + m_snow_old(i) = m_snow(i) !PS + a_ice_old(i) = a_ice(i) !PS + thdgr_old(i) = thdgr(i) !PS + + ! new values + m_ice(i) = h + m_snow(i) = hsn + a_ice(i) = A + + t_skin(i) = t + fresh_wa_flux(i) = fw !positive down + net_heat_flux(i) = ehf !positive down + evaporation(i) = evap !negative up + ice_sublimation(i)= subli + + thdgr(i) = ithdgr + thdgrsn(i) = ithdgrsn + flice(i) = iflice + olat_heat(i) = hflatow + osen_heat(i) = hfsenow + olwout(i) = hflwrdout + + ! real salt flux due to salinity that is contained in the sea ice 4-5 psu + real_salt_flux(i) = rsf !PS + + ! if snow file is not given snow computed from prec_rain --> but prec_snow + ! array needs to be filled --> so that the freshwater balancing adds up + if (.not. l_snow) then + prec_rain(i) = rain + prec_snow(i) = snow + end if + end do +!$OMP END DO +!$OMP END PARALLEL end subroutine thermodynamics ! -!=================================================================== ! -subroutine therm_ice(h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss, & - ug,ustar,T_oc,S_oc,H_ML,t,ice_dt,ch,ce,ch_i,ce_i,evap_in,fw,ehf,evap, & - rsf, dhgrowth, dhsngrowth, iflice, hflatow, hfsenow, hflwrdout,lid_clo,subli) - ! Ice Thermodynamic growth model - ! - ! Input parameters: - !------------------ - ! h - ice mass [m] - ! hsn - snow mass [m] - ! A - ice compactness - ! fsh - shortwave radiation - ! flo - longwave radiation - ! Ta - air temperature - ! qa - specific humidity - ! rain - precipitation rain - ! snow - precipitation snow - ! runo - runoff - ! ug - wind speed - ! ustar - friction velocity - ! T_oc, S_oc - ocean temperature and salinity beneath the ice (mixed layer) - ! H_ML - mixed layer depth - should be specified. - ! t - temperature of snow/ice top surface - ! ice_dt - time step [s] - ! ch - transfer coefficient for sensible heat (for open ocean) - ! ce - transfer coefficient for evaporation (for open ocean) - ! ch_i - transfer coefficient for sensible heat (for ice) - ! ce_i - transfer coefficient for evaporation (for ice) - ! lid_clo - lid closing parameter - ! Output parameters: - !------------------- - ! h - ice mass - ! hsn - snow mass - ! A - ice compactness - ! t - temperature of snow/ice top surface - ! fw - freshwater flux due to ice melting [m water/ice_dt] - ! ehf - net heat flux at the ocean surface [W/m2] !RTnew - - use i_therm_param - use g_forcing_param, only: use_virt_salt - - use o_param - use g_parsup - implicit none - - integer k - real(kind=WP) h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,evap_in - real(kind=WP) ug,ustar,T_oc,S_oc,H_ML,t,ice_dt,ch,ce,ch_i,ce_i,fw,ehf - real(kind=WP) dhgrowth,dhsngrowth,ahf,prec,subli,subli_i,rsf - real(kind=WP) rhow,show,rhice,shice,sh,thick,thact,lat - real(kind=WP) rh,rA,qhst,sn,hsntmp,o2ihf,evap - real(kind=WP) iflice,hflatow,hfsenow,hflwrdout - real(kind=WP), external :: TFrez ! Sea water freeze temperature. - real(kind=WP) lid_clo - ! Store ice thickness at start of growth routine - dhgrowth=h - - ! determine h(i,j)/a(i,j) = actual ice thickness. - ! if snow layer is present, add hsn weighted with quotient - ! of conductivities of ice and snow, according to 0-layer approach - ! of Semtner (1976). - ! thickness at the ice covered part - thick=hsn*(con/consn)/max(A,Armin) ! Effective snow thickness - thick=thick+h/max(A,Armin) ! Effective total snow-ice thickness - - ! Growth rate for ice in open ocean - rhow=0.0_WP - evap=0.0_WP - call obudget(qa,fsh,flo,T_oc,ug,ta,ch,ce,rhow,evap,hflatow,hfsenow,hflwrdout) - hflatow=hflatow*(1.0_WP-A) - hfsenow=hfsenow*(1.0_WP-A) - hflwrdout=hflwrdout*(1.0_WP-A) - - ! add heat loss at open ocean due to melting snow fall - !rhow=rhow+snow*1000.0/rhoice !qiang - ! ice_dt and (1-A) will be multiplied afterwards - - ! growth rate of ice in ice covered part - ! following Hibler 1984 - ! assuming ice thickness has an euqal, 7-level distribution from zero to two times h - rhice=0.0_WP - subli=0.0_WP - if (thick.gt.hmin) then - do k=1,iclasses - thact = real((2*k-1),WP)*thick/real(iclasses,WP) ! Thicknesses of actual ice class - call budget(thact,hsn,t,Ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,shice,subli_i) - !Thick ice K-class growth rate - rhice=rhice+shice ! Add to average heat flux - subli=subli+subli_i - end do - rhice=rhice/real(iclasses,WP) ! Add to average heat flux - subli=subli/real(iclasses,WP) - end if - - ! Convert growth rates [m ice/sec] into growth per time step DT. - rhow=rhow*ice_dt - rhice=rhice*ice_dt - - ! Multiply ice growth of open water and ice - ! with the corresponding areal fractions of grid cell - show =rhow*(1.0_WP-A) - shice=rhice*A - sh =show+shice - - ! Store atmospheric heat flux, average over grid cell [W/m**2] - ahf=-cl*sh/ice_dt - - ! precipitation (into the ocean) - prec=rain+runo+snow*(1.0_WP-A) ! m water/s - - ! snow fall above ice - hsn=hsn+snow*ice_dt*A*1000.0_WP*inv_rhosno ! Add snow fall to temporary snow thickness !!! - dhsngrowth=hsn ! Store snow thickness after snow fall - - evap=evap*(1.0_WP-A) ! m water/s - subli=subli*A - - ! If there is atmospheric melting, first melt any snow that is present. - ! Atmospheric heat flux available for melting - ! sh = MINUS atm. heat flux / specific latent heat of sea ice - ! Note: (sh<0) for melting, (sh>0) for freezing - hsntmp= -min(sh,0.0_WP)*rhoice*inv_rhosno - - ! hsntmp is the decrease in snow thickness due to atmospheric melting - ! [m/DT]. Do not melt more snow than available - hsntmp=min(hsntmp,hsn) - hsn=hsn-hsntmp ! Update snow thickness after atmospheric snow melt - - ! Negative atmospheric heat flux left after melting of snow - ! Note: (sh<0) and (hsntmp>0) for melting conditions - ! hsntmp=0 for non-snow-melting conditions - rh=sh+hsntmp*rhosno/rhoice - h=max(h,0.0_WP) - - ! Compute heat content qhst of mixed layer - sea ice system - ! - ! Total heat content is the sum of - ! h ice thickness after calculation of dynamic effects - ! 178418rh change in ice thickness due to atmospheric forcing - ! and heat available in mixed layer, with - ! T_oc temperature of ocean surface layer - ! Tfrez freezing point of sea water - ! H_ML thickness of uppermost layer - ! - !RT: - ! There are three possibilities to do this. - ! 1.: Assume an instantaneous adjustment of mixed layer heat content. - ! Any heat available is then instantaneously used to melt ice. - ! (so-called ice-bath approach) - ! This is what used to be used in the Lemke sea ice-mixed layer model. - ! rh=rh-(T_oc-TFrez(S_oc))*H_ML*cc/cl - ! qhst=h+rh - ! - ! 2.: Parameterize the ocean-to-ice heat flux (o2ihf) - ! as a function of temperature difference. For a first step - ! we can assume a constant exchange coefficient gamma_t: - ! o2ihf= (T_oc-TFrez(S_oc))*gamma_t*cc*A & - ! +(T_oc-Tfrez(S_oc))*H_ML/ice_dt*cc*(1.0-A) ! [W/m2] - ! rh=rh-o2ihf*ice_dt/cl - ! qhst=h+rh ! [m] - ! - ! 3. Parameterize the ocean-to-ice heat flux (o2ihf) - ! as a function of temperature difference and the - ! friction velocity: - o2ihf= (T_oc-TFrez(S_oc))*0.006_WP*ustar*cc*A & - +(T_oc-Tfrez(S_oc))*H_ML/ice_dt*cc*(1.0_WP-A) ! [W/m2] - rh=rh-o2ihf*ice_dt/cl - qhst=h+rh ! [m] - - ! Melt snow if there is any ML heat content left (qhst<0). - ! This may be the case if advection moves ice (with snow) to regions - ! with a warm mixed layer. - sn=hsn+min(qhst,0.0_WP)*rhoice*inv_rhosno - - ! New temporary snow thickness must not be negative: - sn=max(sn,0.0_WP) - - ! Update snow and ice depth - hsn=sn - h=max(qhst,0.0_WP) - if (h.lt.1E-6_WP) h=0._WP ! Avoid very small ice thicknesses - - ! heat and fresh water fluxes - dhgrowth=h-dhgrowth ! Change in ice thickness due to thermodynamic effects - dhsngrowth=hsn-dhsngrowth ! Change in snow thickness due to thermodynamic melting - - ! (without snow fall). This is a negative value (MINUS snow melt) - - dhgrowth=dhgrowth/ice_dt ! Conversion: 'per time step' -> 'per second' - dhsngrowth=dhsngrowth/ice_dt ! Conversion: 'per time step' -> 'per second' - ! (radiation+turbulent) + freezing(-melting) sea-ice&snow - - ehf = ahf + cl*(dhgrowth+(rhosno/rhoice)*dhsngrowth) +!_______________________________________________________________________________ +subroutine therm_ice(ithermp, h, hsn, A, fsh, flo, Ta, qa, rain, snow, runo, rsss, & + ug, ustar, T_oc, S_oc, H_ML, t, ice_dt, ch, ce, ch_i, ce_i, & + evap_in, fw, ehf, evap, rsf, dhgrowth, dhsngrowth, iflice, & + hflatow, hfsenow, hflwrdout, lid_clo, subli) + ! Ice Thermodynamic growth model + ! + ! Input parameters: + !------------------ + ! h - ice mass [m] + ! hsn - snow mass [m] + ! A - ice compactness + ! fsh - shortwave radiation + ! flo - longwave radiation + ! Ta - air temperature + ! qa - specific humidity + ! rain - precipitation rain + ! snow - precipitation snow + ! runo - runoff + ! ug - wind speed + ! ustar - friction velocity + ! T_oc, S_oc - ocean temperature and salinity beneath the ice (mixed layer) + ! H_ML - mixed layer depth - should be specified. + ! t - temperature of snow/ice top surface + ! ice_dt - time step [s] + ! ch - transfer coefficient for sensible heat (for open ocean) + ! ce - transfer coefficient for evaporation (for open ocean) + ! ch_i - transfer coefficient for sensible heat (for ice) + ! ce_i - transfer coefficient for evaporation (for ice) + ! lid_clo - lid closing parameter + ! Output parameters: + !------------------- + ! h - ice mass + ! hsn - snow mass + ! A - ice compactness + ! t - temperature of snow/ice top surface + ! fw - freshwater flux due to ice melting [m water/ice_dt] + ! ehf - net heat flux at the ocean surface [W/m2] !RTnew + + USE MOD_ICE + use g_forcing_param, only: use_virt_salt + use o_param + use ice_budget_interfaces + implicit none + type(t_ice_thermo), intent(in), target :: ithermp + integer k + real(kind=WP) h,hsn,A,fsh,flo,Ta,qa,rain,snow,runo,rsss,evap_in + real(kind=WP) ug,ustar,T_oc,S_oc,H_ML,t,ice_dt,ch,ce,ch_i,ce_i,fw,ehf + real(kind=WP) dhgrowth,dhsngrowth,ahf,prec,subli,subli_i,rsf + real(kind=WP) rhow,show,rhice,shice,sh,thick,thact,lat + real(kind=WP) rh,rA,qhst,sn,hsntmp,o2ihf,evap + real(kind=WP) iflice,hflatow,hfsenow,hflwrdout + real(kind=WP), external :: TFrez ! Sea water freeze temperature. + real(kind=WP) lid_clo + !___________________________________________________________________________ + real(kind=WP), pointer :: hmin, Sice, Armin, cc, cl, con, consn, rhosno, rhoice, inv_rhowat, inv_rhosno + integer , pointer :: iclasses + hmin => ithermp%hmin + Armin => ithermp%Armin + Sice => ithermp%Sice + cc => ithermp%cc + cl => ithermp%cl + con => ithermp%con + consn => ithermp%consn + iclasses => ithermp%iclasses + rhosno => ithermp%rhosno + rhoice => ithermp%rhoice + inv_rhowat => ithermp%inv_rhowat + inv_rhosno => ithermp%inv_rhosno + + !___________________________________________________________________________ + ! Store ice thickness at start of growth routine + dhgrowth=h + + ! determine h(i,j)/a(i,j) = actual ice thickness. + ! if snow layer is present, add hsn weighted with quotient + ! of conductivities of ice and snow, according to 0-layer approach + ! of Semtner (1976). + ! thickness at the ice covered part + thick=hsn*(con/consn)/max(A,Armin) ! Effective snow thickness + thick=thick+h/max(A,Armin) ! Effective total snow-ice thickness + + ! Growth rate for ice in open ocean + rhow=0.0_WP + evap=0.0_WP + call obudget(ithermp, qa,fsh,flo,T_oc,ug,ta,ch,ce,rhow,evap,hflatow,hfsenow,hflwrdout) + hflatow=hflatow*(1.0_WP-A) + hfsenow=hfsenow*(1.0_WP-A) + hflwrdout=hflwrdout*(1.0_WP-A) + + ! add heat loss at open ocean due to melting snow fall + !rhow=rhow+snow*1000.0/rhoice !qiang + ! ice_dt and (1-A) will be multiplied afterwards + + ! growth rate of ice in ice covered part + ! following Hibler 1984 + ! assuming ice thickness has an euqal, 7-level distribution from zero to two times h + rhice=0.0_WP + subli=0.0_WP + if (thick.gt.hmin) then + do k=1,iclasses + thact = real((2*k-1),WP)*thick/real(iclasses,WP) ! Thicknesses of actual ice class + call budget(ithermp, thact, hsn,t,Ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,shice,subli_i) + !Thick ice K-class growth rate + rhice=rhice+shice ! Add to average heat flux + subli=subli+subli_i + end do + rhice=rhice/real(iclasses,WP) ! Add to average heat flux + subli=subli/real(iclasses,WP) + end if + + ! Convert growth rates [m ice/sec] into growth per time step DT. + rhow=rhow*ice_dt + rhice=rhice*ice_dt + + ! Multiply ice growth of open water and ice + ! with the corresponding areal fractions of grid cell + show =rhow*(1.0_WP-A) + shice=rhice*A + sh =show+shice + + ! Store atmospheric heat flux, average over grid cell [W/m**2] + ahf=-cl*sh/ice_dt + + ! precipitation (into the ocean) + prec=rain+runo+snow*(1.0_WP-A) ! m water/s + + ! snow fall above ice + hsn=hsn+snow*ice_dt*A*1000.0_WP*inv_rhosno ! Add snow fall to temporary snow thickness !!! + dhsngrowth=hsn ! Store snow thickness after snow fall + + evap=evap*(1.0_WP-A) ! m water/s + subli=subli*A + + ! If there is atmospheric melting, first melt any snow that is present. + ! Atmospheric heat flux available for melting + ! sh = MINUS atm. heat flux / specific latent heat of sea ice + ! Note: (sh<0) for melting, (sh>0) for freezing + hsntmp= -min(sh,0.0_WP)*rhoice*inv_rhosno + + ! hsntmp is the decrease in snow thickness due to atmospheric melting + ! [m/DT]. Do not melt more snow than available + hsntmp=min(hsntmp,hsn) + hsn=hsn-hsntmp ! Update snow thickness after atmospheric snow melt + + ! Negative atmospheric heat flux left after melting of snow + ! Note: (sh<0) and (hsntmp>0) for melting conditions + ! hsntmp=0 for non-snow-melting conditions + rh=sh+hsntmp*rhosno/rhoice + h=max(h,0.0_WP) + + ! Compute heat content qhst of mixed layer - sea ice system + ! + ! Total heat content is the sum of + ! h ice thickness after calculation of dynamic effects + ! 178418rh change in ice thickness due to atmospheric forcing + ! and heat available in mixed layer, with + ! T_oc temperature of ocean surface layer + ! Tfrez freezing point of sea water + ! H_ML thickness of uppermost layer + ! + !RT: + ! There are three possibilities to do this. + ! 1.: Assume an instantaneous adjustment of mixed layer heat content. + ! Any heat available is then instantaneously used to melt ice. + ! (so-called ice-bath approach) + ! This is what used to be used in the Lemke sea ice-mixed layer model. + ! rh=rh-(T_oc-TFrez(S_oc))*H_ML*cc/cl + ! qhst=h+rh + ! + ! 2.: Parameterize the ocean-to-ice heat flux (o2ihf) + ! as a function of temperature difference. For a first step + ! we can assume a constant exchange coefficient gamma_t: + ! o2ihf= (T_oc-TFrez(S_oc))*gamma_t*cc*A & + ! +(T_oc-Tfrez(S_oc))*H_ML/ice_dt*cc*(1.0-A) ! [W/m2] + ! rh=rh-o2ihf*ice_dt/cl + ! qhst=h+rh ! [m] + ! + ! 3. Parameterize the ocean-to-ice heat flux (o2ihf) + ! as a function of temperature difference and the + ! friction velocity: + o2ihf= (T_oc-TFrez(S_oc))*0.006_WP*ustar*cc*A & + +(T_oc-Tfrez(S_oc))*H_ML/ice_dt*cc*(1.0_WP-A) ! [W/m2] + rh=rh-o2ihf*ice_dt/cl + qhst=h+rh ! [m] + + ! Melt snow if there is any ML heat content left (qhst<0). + ! This may be the case if advection moves ice (with snow) to regions + ! with a warm mixed layer. + sn=hsn+min(qhst,0.0_WP)*rhoice*inv_rhosno + + ! New temporary snow thickness must not be negative: + sn=max(sn,0.0_WP) + + ! Update snow and ice depth + hsn=sn + h=max(qhst,0.0_WP) + if (h.lt.1E-6_WP) h=0._WP ! Avoid very small ice thicknesses + + ! heat and fresh water fluxes + dhgrowth=h-dhgrowth ! Change in ice thickness due to thermodynamic effects + dhsngrowth=hsn-dhsngrowth ! Change in snow thickness due to thermodynamic melting + + ! (without snow fall). This is a negative value (MINUS snow melt) + + dhgrowth=dhgrowth/ice_dt ! Conversion: 'per time step' -> 'per second' + dhsngrowth=dhsngrowth/ice_dt ! Conversion: 'per time step' -> 'per second' + ! (radiation+turbulent) + freezing(-melting) sea-ice&snow + + ehf = ahf + cl*(dhgrowth+(rhosno/rhoice)*dhsngrowth) + + ! (prec+runoff)+evap - freezing(+melting) ice&snow + if (.not. use_virt_salt) then + fw= prec+evap - dhgrowth*rhoice*inv_rhowat - dhsngrowth*rhosno*inv_rhowat + rsf= -dhgrowth*rhoice*inv_rhowat*Sice + else + fw= prec+evap - dhgrowth*rhoice*inv_rhowat*(rsss-Sice)/rsss - dhsngrowth*rhosno*inv_rhowat + end if + + ! Changes in compactnesses (equation 16 of Hibler 1979) + rh=-min(h,-rh) ! Make sure we do not try to melt more ice than is available + rA= rhow - o2ihf*ice_dt/cl !Qiang: it was -(T_oc-TFrez(S_oc))*H_ML*cc/cl, changed in June 2010 + !rA= rhow - (T_oc-TFrez(S_oc))*H_ML*cc/cl*(1.0-A) + A=A + 0.5_WP*min(rh,0.0_WP)*A/max(h,hmin) + max(rA,0.0_WP)*(1._WP-A)/lid_clo !/h0 + !meaning: melting freezing + + A=min(A,h*1.e6_WP) ! A -> 0 for h -> 0 + A=min(max(A,0.0_WP),1._WP) ! A >= 0, A <= 1 + + ! Flooding (snow to ice conversion) + iflice=h + call flooding(ithermp, h, hsn) + iflice=(h-iflice)/ice_dt + + ! to maintain salt conservation for the current model version + !(a way to avoid producing net salt from snow-type-ice) + if (.not. use_virt_salt) then + rsf=rsf-iflice*rhoice*inv_rhowat*Sice + else + fw=fw+iflice*rhoice*inv_rhowat*Sice/rsss + end if + + evap=evap+subli - ! (prec+runoff)+evap - freezing(+melting) ice&snow - if (.not. use_virt_salt) then - fw= prec+evap - dhgrowth*rhoice*inv_rhowat - dhsngrowth*rhosno*inv_rhowat - rsf= -dhgrowth*rhoice*inv_rhowat*Sice - else - fw= prec+evap - dhgrowth*rhoice*inv_rhowat*(rsss-Sice)/rsss - dhsngrowth*rhosno*inv_rhowat - end if - - ! Changes in compactnesses (equation 16 of Hibler 1979) - rh=-min(h,-rh) ! Make sure we do not try to melt more ice than is available - rA= rhow - o2ihf*ice_dt/cl !Qiang: it was -(T_oc-TFrez(S_oc))*H_ML*cc/cl, changed in June 2010 - !rA= rhow - (T_oc-TFrez(S_oc))*H_ML*cc/cl*(1.0-A) - A=A + 0.5_WP*min(rh,0.0_WP)*A/max(h,hmin) + max(rA,0.0_WP)*(1._WP-A)/lid_clo !/h0 - !meaning: melting freezing - - A=min(A,h*1.e6_WP) ! A -> 0 for h -> 0 - A=min(max(A,0.0_WP),1._WP) ! A >= 0, A <= 1 - - ! Flooding (snow to ice conversion) - iflice=h - call flooding(h,hsn) - iflice=(h-iflice)/ice_dt - - ! to maintain salt conservation for the current model version - !(a way to avoid producing net salt from snow-type-ice) - if (.not. use_virt_salt) then - rsf=rsf-iflice*rhoice*inv_rhowat*Sice - else - fw=fw+iflice*rhoice*inv_rhowat*Sice/rsss - end if - - evap=evap+subli - end subroutine therm_ice ! -!===================================================================================== ! -subroutine budget (hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh,subli) - ! Thick ice growth rate [m ice/sec] - ! - ! INPUT: - ! hice - actual ice thickness [m] - ! hsn - snow thickness, used for albedo parameterization [m] - ! t - temperature of snow/ice surface [C] - ! ta - air temperature [C] - ! qa - specific humidity [Kg/Kg] - ! fsh - shortwave radiation [W/m**2] - ! flo - longwave radiation [W/m**2] - ! ug - wind speed [m/sec] - ! S_oc - ocean salinity for the temperature of the ice base calculation [ppt] - ! ch_i - transfer coefficient for sensible heat (for ice) - ! ce_i - transfer coefficient for evaporation (for ice) - ! - ! OUTPUT: fh - growth rate - ! - ! qiang: The formular for saturated humidity was modified according to Large/Yeager2004 - ! to allow direct comparison with the CORE results (Griffies et al. 2009). The new - ! formular does not require sea level pressure. - ! A similar change was also made for the obudget routine. - ! It was found through experiments that the results are quite similar to that from the - ! original code, and the simulated ice volume is only slightly larger after modification. - - use i_therm_param - use o_param, only: WP - implicit none - - integer iter, imax ! Number of iterations - real(kind=WP) hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh - real(kind=WP) hfsen,hfrad,hflat,hftot,subli - real(kind=WP) alb ! Albedo of sea ice - real(kind=WP) q1, q2 ! coefficients for saturated specific humidity - real(kind=WP) A1,A2,A3,B,C, d1, d2, d3 - real(kind=WP), external :: TFrez - -!!PS data q1 /11637800.0/, q2 /-5897.8/ -!!PS data imax /5/ - - q1 = 11637800.0_WP - q2 = -5897.8_WP - imax = 5 - - ! set albedo - ! ice and snow, freezing and melting conditions are distinguished. - if (t<0.0_WP) then ! freezing condition - if (hsn.gt.0.0_WP) then ! snow cover present - alb=albsn - else ! no snow cover - alb=albi - endif - else ! melting condition - if (hsn.gt.0.0_WP) then ! snow cover present - alb=albsnm - else ! no snow cover - alb=albim - endif - endif - - d1=rhoair*cpair*Ch_i - d2=rhoair*Ce_i - d3=d2*clhi - - ! total incoming atmospheric heat flux - A1=(1.0_WP-alb)*fsh + flo + d1*ug*ta + d3*ug*qa ! in LY2004 emiss is multiplied wiht flo - ! NEWTON-RHAPSON TO GET TEMPERATURE AT THE TOP OF THE ICE LAYER - - do iter=1,imax - - B=q1*inv_rhoair*exp(q2/(t+tmelt)) ! (saturated) specific humidity over ice - A2=-d1*ug*t-d3*ug*B & - -emiss_ice*boltzmann*((t+tmelt)**4) ! sensible and latent heat,and outward radiation - A3=-d3*ug*B*q2/((t+tmelt)**2) ! gradient coefficient for the latent heat part - C=con/hice ! gradient coefficient for downward heat conductivity - A3=A3+C+d1*ug & ! gradient coefficient for sensible heat and radiation - +4.0_WP*emiss_ice*boltzmann*((t+tmelt)**3) - C=C*(TFrez(S_oc)-t) ! downward conductivity term - - t=t+(A1+A2+C)/A3 ! NEW ICE TEMPERATURE AS THE SUM OF ALL COMPONENTS - end do - - t=min(0.0_WP,t) - ! heat fluxes [W/m**2]: - - hfrad= (1.0_WP-alb)*fsh & ! absorbed short wave radiation - +flo & ! long wave radiation coming in ! in LY2004 emiss is multiplied - -emiss_ice*boltzmann*((t+tmelt)**4) ! long wave radiation going out - - hfsen=d1*ug*(ta-t) ! sensible heat - subli=d2*ug*(qa-B) ! sublimation - hflat=clhi*subli ! latent heat - - hftot=hfrad+hfsen+hflat ! total heat - - fh= -hftot/cl ! growth rate [m ice/sec] - ! +: ML gains energy, ice melts - ! -: ML loses energy, ice grows - subli=subli*inv_rhowat ! negative upward - - return +!_______________________________________________________________________________ +subroutine budget (ithermp, hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh,subli) + ! Thick ice growth rate [m ice/sec] + ! + ! INPUT: + ! hice - actual ice thickness [m] + ! hsn - snow thickness, used for albedo parameterization [m] + ! t - temperature of snow/ice surface [C] + ! ta - air temperature [C] + ! qa - specific humidity [Kg/Kg] + ! fsh - shortwave radiation [W/m**2] + ! flo - longwave radiation [W/m**2] + ! ug - wind speed [m/sec] + ! S_oc - ocean salinity for the temperature of the ice base calculation [ppt] + ! ch_i - transfer coefficient for sensible heat (for ice) + ! ce_i - transfer coefficient for evaporation (for ice) + ! + ! OUTPUT: fh - growth rate + ! + ! qiang: The formular for saturated humidity was modified according to Large/Yeager2004 + ! to allow direct comparison with the CORE results (Griffies et al. 2009). The new + ! formular does not require sea level pressure. + ! A similar change was also made for the obudget routine. + ! It was found through experiments that the results are quite similar to that from the + ! original code, and the simulated ice volume is only slightly larger after modification. + use MOD_ICE + use o_param, only: WP + implicit none + type(t_ice_thermo), intent(in), target :: ithermp + integer iter, imax ! Number of iterations + real(kind=WP) hice,hsn,t,ta,qa,fsh,flo,ug,S_oc,ch_i,ce_i,fh + real(kind=WP) hfsen,hfrad,hflat,hftot,subli + real(kind=WP) alb ! Albedo of sea ice + real(kind=WP) q1, q2 ! coefficients for saturated specific humidity + real(kind=WP) A1,A2,A3,B,C, d1, d2, d3 + real(kind=WP), external :: TFrez + !___________________________________________________________________________ + real(kind=WP), pointer :: boltzmann, emiss_ice, tmelt, cl, clhi, con, cpair, & + inv_rhowat, inv_rhoair, rhoair, albim, albi, albsn, albsnm + boltzmann => ithermp%boltzmann + emiss_ice => ithermp%emiss_ice + tmelt => ithermp%tmelt + cl => ithermp%cl + clhi => ithermp%clhi + con => ithermp%con + cpair => ithermp%cpair + inv_rhowat => ithermp%inv_rhowat + inv_rhoair => ithermp%inv_rhoair + rhoair => ithermp%rhoair + albim => ithermp%albim + albi => ithermp%albi + albsn => ithermp%albsn + albsnm => ithermp%albsnm + + !___________________________________________________________________________ + q1 = 11637800.0_WP + q2 = -5897.8_WP + imax = 5 + + !___________________________________________________________________________ + ! set albedo + ! ice and snow, freezing and melting conditions are distinguished. + if (t<0.0_WP) then ! --> freezing condition + if (hsn.gt.0.0_WP) then ! --> snow cover present + alb=albsn + else ! --> no snow cover + alb=albi + endif + else ! --> melting condition + if (hsn.gt.0.0_WP) then ! --> snow cover present + alb=albsnm + else ! --> no snow cover + alb=albim + endif + endif + + !___________________________________________________________________________ + d1=rhoair*cpair*Ch_i + d2=rhoair*Ce_i + d3=d2*clhi + + ! total incoming atmospheric heat flux + A1=(1.0_WP-alb)*fsh + flo + d1*ug*ta + d3*ug*qa ! in LY2004 emiss is multiplied wiht flo + ! NEWTON-RHAPSON TO GET TEMPERATURE AT THE TOP OF THE ICE LAYER + + do iter=1,imax + B=q1*inv_rhoair*exp(q2/(t+tmelt)) ! (saturated) specific humidity over ice + A2=-d1*ug*t-d3*ug*B & + -emiss_ice*boltzmann*((t+tmelt)**4) ! sensible and latent heat,and outward radiation + A3=-d3*ug*B*q2/((t+tmelt)**2) ! gradient coefficient for the latent heat part + C=con/hice ! gradient coefficient for downward heat conductivity + A3=A3+C+d1*ug & ! gradient coefficient for sensible heat and radiation + +4.0_WP*emiss_ice*boltzmann*((t+tmelt)**3) + C=C*(TFrez(S_oc)-t) ! downward conductivity term + + t=t+(A1+A2+C)/A3 ! NEW ICE TEMPERATURE AS THE SUM OF ALL COMPONENTS + end do + t=min(0.0_WP,t) + + !___________________________________________________________________________ + ! heat fluxes [W/m**2]: + hfrad= (1.0_WP-alb)*fsh & ! absorbed short wave radiation + +flo & ! long wave radiation coming in ! in LY2004 emiss is multiplied + -emiss_ice*boltzmann*((t+tmelt)**4) ! long wave radiation going out + + hfsen=d1*ug*(ta-t) ! sensible heat + subli=d2*ug*(qa-B) ! sublimation + hflat=clhi*subli ! latent heat + + hftot=hfrad+hfsen+hflat ! total heat + + fh= -hftot/cl ! growth rate [m ice/sec] + ! +: ML gains energy, ice melts + ! -: ML loses energy, ice grows + subli=subli*inv_rhowat ! negative upward + + return end subroutine budget ! -!====================================================================================== ! -subroutine obudget (qa,fsh,flo,t,ug,ta,ch,ce,fh,evap,hflatow,hfsenow,hflwrdout) - ! Ice growth rate for open ocean [m ice/sec] - ! - ! INPUT: - ! t - temperature of open water [C] - ! fsh - shortwave radiation - ! flo - longwave radiation - ! ta - air temperature [C] - ! qa - specific humidity - ! ug - wind speed [m/sec] - ! ch - transfer coefficient for sensible heat - ! ce - transfer coefficient for evaporation - ! - ! OUTPUT: fh - growth rate - ! evap - evaporation - - use i_therm_param - use o_param, only: WP - implicit none - - real(kind=WP) qa,t,ta,fsh,flo,ug,ch,ce,fh,evap - real(kind=WP) hfsenow,hfradow,hflatow,hftotow,hflwrdout,b - real(kind=WP) q1, q2 ! coefficients for saturated specific humidity - real(kind=WP) c1, c4, c5 - logical :: standard_saturation_shum_formula = .true. - integer :: ii - - !data c1, c4, c5 /3.8e-3, 17.67, 243.5/ -!!PS data c1, c4, c5 /3.8e-3, 17.27, 237.3/ -!!PS data q1 /640380./, q2 /-5107.4/ - - c1 = 3.8e-3_WP - c4 = 17.27_WP - c5 = 237.3_WP - q1 = 640380._WP - q2 = -5107.4_WP - - ! (saturated) surface specific humidity - if(standard_saturation_shum_formula) then - b=c1*exp(c4*t/(t+c5)) ! a standard one - else - b=0.98_WP*q1*inv_rhoair*exp(q2/(t+tmelt)) ! LY2004 NCAR version - end if - - ! radiation heat fluxe [W/m**2]: - hfradow= (1.0_WP-albw)*fsh & ! absorbed short wave radiation - +flo ! long wave radiation coming in !put emiss/check - hflwrdout=-emiss_wat*boltzmann*((t+tmelt)**4) ! long wave radiation going out !in LY2004 emiss=1 - hfradow=hfradow+hflwrdout - - ! sensible heat fluxe [W/m**2]: - hfsenow=rhoair*cpair*ch*ug*(ta-t) ! sensible heat - - ! latent heat fluxe [W/m**2]: - evap =rhoair*ce*ug*(qa-b) ! evaporation kg/m2/s - hflatow=clhw*evap ! latent heat W/m2 +!_______________________________________________________________________________ +subroutine obudget (ithermp, qa,fsh,flo,t,ug,ta,ch,ce,fh,evap,hflatow,hfsenow,hflwrdout) + ! Ice growth rate for open ocean [m ice/sec] + ! + ! INPUT: + ! t - temperature of open water [C] + ! fsh - shortwave radiation + ! flo - longwave radiation + ! ta - air temperature [C] + ! qa - specific humidity + ! ug - wind speed [m/sec] + ! ch - transfer coefficient for sensible heat + ! ce - transfer coefficient for evaporation + ! + ! OUTPUT: fh - growth rate + ! evap - evaporation + use MOD_ICE + use o_param, only: WP + implicit none + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) qa,t,ta,fsh,flo,ug,ch,ce,fh,evap + real(kind=WP) hfsenow,hfradow,hflatow,hftotow,hflwrdout,b + real(kind=WP) q1, q2 ! coefficients for saturated specific humidity + real(kind=WP) c1, c4, c5 + logical :: standard_saturation_shum_formula = .true. + integer :: ii + !___________________________________________________________________________ + real(kind=WP), pointer :: boltzmann, emiss_wat, inv_rhowat, inv_rhoair, rhoair, & + tmelt, cl, clhw, cpair, albw + boltzmann => ithermp%boltzmann + emiss_wat => ithermp%emiss_wat + inv_rhowat => ithermp%inv_rhowat + inv_rhoair => ithermp%inv_rhoair + rhoair => ithermp%rhoair + tmelt => ithermp%tmelt + cl => ithermp%cl + clhw => ithermp%clhw + cpair => ithermp%cpair + albw => ithermp%albw + + !___________________________________________________________________________ + c1 = 3.8e-3_WP + c4 = 17.27_WP + c5 = 237.3_WP + q1 = 640380._WP + q2 = -5107.4_WP + + ! (saturated) surface specific humidity + if(standard_saturation_shum_formula) then + b=c1*exp(c4*t/(t+c5)) ! a standard one + else + b=0.98_WP*q1*inv_rhoair*exp(q2/(t+tmelt)) ! LY2004 NCAR version + end if + + ! radiation heat fluxe [W/m**2]: + hfradow= (1.0_WP-albw)*fsh & ! absorbed short wave radiation + +flo ! long wave radiation coming in !put emiss/check + hflwrdout=-emiss_wat*boltzmann*((t+tmelt)**4) ! long wave radiation going out !in LY2004 emiss=1 + hfradow=hfradow+hflwrdout + + ! sensible heat fluxe [W/m**2]: + hfsenow=rhoair*cpair*ch*ug*(ta-t) ! sensible heat + + ! latent heat fluxe [W/m**2]: + evap =rhoair*ce*ug*(qa-b) ! evaporation kg/m2/s + hflatow=clhw*evap ! latent heat W/m2 - ! total heat fluxe [W/m**2]: - hftotow=hfradow+hfsenow+hflatow ! total heat W/m2 - - fh= -hftotow/cl ! growth rate [m ice/sec] - ! +: ML gains energy, ice melts - ! -: ML loses energy, ice grows - evap=evap*inv_rhowat ! evaporation rate [m water/s],negative up !!! + ! total heat fluxe [W/m**2]: + hftotow=hfradow+hfsenow+hflatow ! total heat W/m2 + + fh= -hftotow/cl ! growth rate [m ice/sec] + ! +: ML gains energy, ice melts + ! -: ML loses energy, ice grows + evap=evap*inv_rhowat ! evaporation rate [m water/s],negative up !!! - return + return end subroutine obudget ! -!====================================================================================== ! -subroutine flooding (h,hsn) - use i_therm_param - - real(kind=WP) h,hsn,hdraft,hflood - - hdraft=(rhosno*hsn+h*rhoice)*inv_rhowat ! Archimedes: displaced water - hflood=hdraft-min(hdraft,h) ! Increase in mean ice thickness due to flooding - h=h+hflood ! Add converted snow to ice volume - hsn=hsn-hflood*rhoice*inv_rhosno ! Subtract snow from snow layer - - !RT This is what all AWI sea ice models do, but - !RT I wonder whether it really is correct for the heat budget. - !RT I suggest we initially keep it to allow for a comparison with BRIOS results - !RT and rethink it at a later stage. - - return +!_______________________________________________________________________________ +subroutine flooding (ithermp, h, hsn) + use MOD_ICE + type(t_ice_thermo), intent(in), target :: ithermp + real(kind=WP) h,hsn,hdraft,hflood + !___________________________________________________________________________ + real(kind=WP), pointer :: inv_rhowat, inv_rhosno, rhoice, rhosno + inv_rhowat => ithermp%inv_rhowat + inv_rhosno => ithermp%inv_rhosno + rhoice => ithermp%rhoice + rhosno => ithermp%rhosno + + !___________________________________________________________________________ + hdraft=(rhosno*hsn+h*rhoice)*inv_rhowat ! Archimedes: displaced water + hflood=hdraft-min(hdraft,h) ! Increase in mean ice thickness due to flooding + h=h+hflood ! Add converted snow to ice volume + hsn=hsn-hflood*rhoice*inv_rhosno ! Subtract snow from snow layer + + !RT This is what all AWI sea ice models do, but + !RT I wonder whether it really is correct for the heat budget. + !RT I suggest we initially keep it to allow for a comparison with BRIOS results + !RT and rethink it at a later stage. + + return end subroutine flooding ! -!====================================================================================== ! +!_______________________________________________________________________________ function TFrez(S) - ! Nonlinear correlation for the water freezing temperature. - ! Millero (1978) - UNESCO. Reference - See A. Gill, 1982. - use o_param, only: WP - implicit none - real(kind=WP) :: S, TFrez + ! Nonlinear correlation for the water freezing temperature. + ! Millero (1978) - UNESCO. Reference - See A. Gill, 1982. + use o_param, only: WP + implicit none + real(kind=WP) :: S, TFrez - TFrez= -0.0575_WP*S+1.7105e-3_WP *sqrt(S**3)-2.155e-4_WP *S*S + TFrez= -0.0575_WP*S+1.7105e-3_WP *sqrt(S**3)-2.155e-4_WP *S*S end function TFrez ! -!====================================================================================== ! -#endif +!_______________________________________________________________________________ +#endif /* #if !defined (__oasis) && !defined (__ifsinterface) */ diff --git a/src/associate_mesh.h b/src/icepack_drivers/associate_mesh.h similarity index 63% rename from src/associate_mesh.h rename to src/icepack_drivers/associate_mesh.h index 554819e88..c9c789c7a 100644 --- a/src/associate_mesh.h +++ b/src/icepack_drivers/associate_mesh.h @@ -1,9 +1,14 @@ -integer , pointer :: nod2D -integer , pointer :: elem2D -integer , pointer :: edge2D +integer , pointer :: nod2D, myDim_nod2D, eDim_nod2D +integer , pointer :: elem2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D +integer , pointer :: edge2D, myDim_edge2D, eDim_edge2D integer , pointer :: edge2D_in real(kind=WP) , pointer :: ocean_area +real(kind=WP) , pointer :: ocean_areawithcav integer , pointer :: nl +integer , pointer :: nn_size + + + real(kind=WP), dimension(:,:), pointer :: coord_nod2D, geo_coord_nod2D integer, dimension(:,:) , pointer :: elem2D_nodes integer, dimension(:,:) , pointer :: edges @@ -22,7 +27,7 @@ real(kind=WP), dimension(:,:), pointer :: gradient_sca integer, dimension(:) , pointer :: bc_index_nod2D real(kind=WP), dimension(:) , pointer :: zbar, Z, elem_depth integer, dimension(:) , pointer :: nlevels, nlevels_nod2D, nlevels_nod2D_min -real(kind=WP), dimension(:,:), pointer :: area, area_inv +real(kind=WP), dimension(:,:), pointer :: area, area_inv, areasvol, areasvol_inv real(kind=WP), dimension(:) , pointer :: mesh_resolution real(kind=WP), dimension(:) , pointer :: lump2d_north, lump2d_south type(sparse_matrix) , pointer :: ssh_stiff @@ -30,51 +35,42 @@ type(sparse_matrix) , pointer :: ssh_stiff integer, dimension(:) , pointer :: cavity_flag_n, cavity_flag_e real(kind=WP), dimension(:) , pointer :: cavity_depth integer, dimension(:) , pointer :: ulevels, ulevels_nod2D, ulevels_nod2D_max +integer, dimension(:) , pointer :: nn_num +integer, dimension(:,:), pointer :: nn_pos + +real(kind=WP), dimension(:,:), pointer :: hnode +real(kind=WP), dimension(:,:), pointer :: hnode_new +real(kind=WP), dimension(:,:), pointer :: zbar_3d_n +real(kind=WP), dimension(:,:), pointer :: Z_3d_n +real(kind=WP), dimension(:,:), pointer :: helem +real(kind=WP), dimension(:) , pointer :: bottom_elem_thickness +real(kind=WP), dimension(:) , pointer :: bottom_node_thickness +real(kind=WP), dimension(:) , pointer :: dhe +real(kind=WP), dimension(:) , pointer :: hbar +real(kind=WP), dimension(:) , pointer :: hbar_old +real(kind=WP), dimension(:) , pointer :: zbar_n_bot +real(kind=WP), dimension(:) , pointer :: zbar_e_bot +real(kind=WP), dimension(:) , pointer :: zbar_n_srf +real(kind=WP), dimension(:) , pointer :: zbar_e_srf nod2D => mesh%nod2D elem2D => mesh%elem2D edge2D => mesh%edge2D edge2D_in => mesh%edge2D_in -ocean_area => mesh%ocean_area +ocean_area => mesh%ocean_area +ocean_areawithcav => mesh%ocean_areawithcav nl => mesh%nl +nn_size => mesh%nn_size + + +myDim_nod2D => p_partit%myDim_nod2D +eDim_nod2D => p_partit%eDim_nod2D +myDim_elem2D => p_partit%myDim_elem2D +eDim_elem2D => p_partit%eDim_elem2D +eXDim_elem2D => p_partit%eXDim_elem2D +myDim_edge2D => p_partit%myDim_edge2D +eDim_edge2D => p_partit%eDim_edge2D -!!$coord_nod2D => mesh%coord_nod2D -!!$geo_coord_nod2D => mesh%geo_coord_nod2D -!!$elem2D_nodes => mesh%elem2D_nodes -!!$edges => mesh%edges -!!$edge_tri => mesh%edge_tri -!!$elem_edges => mesh%elem_edges -!!$elem_area => mesh%elem_area -!!$edge_dxdy => mesh%edge_dxdy -!!$edge_cross_dxdy => mesh%edge_cross_dxdy -!!$elem_cos => mesh%elem_cos -!!$metric_factor => mesh%metric_factor -!!$elem_neighbors => mesh%elem_neighbors -!!$nod_in_elem2D => mesh%nod_in_elem2D -!!$x_corners => mesh%x_corners -!!$y_corners => mesh%y_corners -!!$nod_in_elem2D_num => mesh%nod_in_elem2D_num -!!$depth => mesh%depth -!!$gradient_vec => mesh%gradient_vec -!!$gradient_sca => mesh%gradient_sca -!!$bc_index_nod2D => mesh%bc_index_nod2D -!!$zbar => mesh%zbar -!!$Z => mesh%Z -!!$elem_depth => mesh%elem_depth -!!$nlevels => mesh%nlevels -!!$nlevels_nod2D => mesh%nlevels_nod2D -!!$nlevels_nod2D_min => mesh%nlevels_nod2D_min -!!$area => mesh%area -!!$area_inv => mesh%area_inv -!!$mesh_resolution => mesh%mesh_resolution -!!$ssh_stiff => mesh%ssh_stiff -!!$cavity_flag => mesh%cavity_flag -!!$cavity_lev_nod2D => mesh%cavity_lev_nod2D -!!$cavity_lev_elem2D => mesh%cavity_lev_elem2D -!!$cavity_depth => mesh%cavity_depth -!!$ulevels => mesh%ulevels -!!$ulevels_nod2D => mesh%ulevels_nod2D -!!$ulevels_nod2D_max => mesh%ulevels_nod2D_max coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%coord_nod2D geo_coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => mesh%geo_coord_nod2D @@ -82,7 +78,7 @@ elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem2D_nodes edges(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edges edge_tri(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_tri elem_edges(1:3,1:myDim_elem2D) => mesh%elem_edges -elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area +elem_area(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_area edge_dxdy(1:2,1:myDim_edge2D+eDim_edge2D) => mesh%edge_dxdy edge_cross_dxdy(1:4,1:myDim_edge2D+eDim_edge2D) => mesh%edge_cross_dxdy elem_cos(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%elem_cos @@ -102,8 +98,10 @@ elem_depth => mesh%elem_depth ! never used, not even allocated nlevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%nlevels nlevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D nlevels_nod2D_min(1:myDim_nod2D+eDim_nod2D) => mesh%nlevels_nod2D_min -area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area -area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv +area(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area +areasvol(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol +area_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%area_inv +areasvol_inv(1:mesh%nl,1:myDim_nod2d+eDim_nod2D) => mesh%areasvol_inv mesh_resolution(1:myDim_nod2d+eDim_nod2D) => mesh%mesh_resolution ssh_stiff => mesh%ssh_stiff lump2d_north(1:myDim_nod2d) => mesh%lump2d_north @@ -116,3 +114,21 @@ cavity_depth(1:myDim_nod2D+eDim_nod2D) => mesh%cavity_depth ulevels(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => mesh%ulevels ulevels_nod2D(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D ulevels_nod2D_max(1:myDim_nod2D+eDim_nod2D) => mesh%ulevels_nod2D_max +nn_num(1:myDim_nod2D) => mesh%nn_num +nn_pos(1:mesh%nn_size, 1:myDim_nod2D) => mesh%nn_pos +hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode +hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new +zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n +Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n +helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem +bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness +bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness +dhe(1:myDim_elem2D) => mesh%dhe +hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar +hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old +zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot +zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot +zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf +zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf + + diff --git a/src/icepack_drivers/icedrv_advection.F90 b/src/icepack_drivers/icedrv_advection.F90 index c15b3e47c..8c0b72a91 100644 --- a/src/icepack_drivers/icedrv_advection.F90 +++ b/src/icepack_drivers/icedrv_advection.F90 @@ -35,18 +35,17 @@ contains - subroutine tg_rhs_icepack(mesh, trc) + subroutine tg_rhs_icepack(ice, mesh, trc) use mod_mesh - use i_param - use g_parsup + use MOD_ICE use o_param use g_config implicit none ! Input - output - + type(t_ice), target, intent(in) :: ice type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc @@ -57,7 +56,7 @@ subroutine tg_rhs_icepack(mesh, trc) integer(kind=int_kind) :: n, q, row, & elem, elnodes(3) -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Taylor-Galerkin (Lax-Wendroff) rhs @@ -80,14 +79,14 @@ subroutine tg_rhs_icepack(mesh, trc) ! Diffusivity - diff = ice_diff * sqrt( elem_area(elem) / scale_area ) + diff =ice% ice_diff * sqrt( elem_area(elem) / scale_area ) do n = 1, 3 row = elnodes(n) do q = 1, 3 - entries(q) = vol*ice_dt*((dx(n)*(um+uvel(elnodes(q))) + & + entries(q) = vol*ice%ice_dt*((dx(n)*(um+uvel(elnodes(q))) + & dy(n)*(vm+vvel(elnodes(q))))/12.0_WP - & diff*(dx(n)*dx(q)+ dy(n)*dy(q)) - & - 0.5_WP*ice_dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0_WP) + 0.5_WP*ice%ice_dt*(um*dx(n)+vm*dy(n))*(um*dx(q)+vm*dy(q))/9.0_WP) enddo rhs_tr(row)=rhs_tr(row)+sum(entries*trc(elnodes)) enddo @@ -100,13 +99,13 @@ end subroutine tg_rhs_icepack module subroutine init_advection_icepack(mesh) use o_param - use o_mesh - use g_parsup use mod_mesh implicit none type(t_mesh), intent(in), target :: mesh + +#include "associate_mesh.h" ! Initialization of arrays necessary to implement FCT algorithm allocate(trl(nx)) ! low-order solutions @@ -139,10 +138,7 @@ end subroutine init_advection_icepack subroutine fill_mass_matrix_icepack(mesh) use mod_mesh - use o_mesh - use i_param - use g_parsup - + implicit none integer(kind=int_kind) :: n, n1, n2, row @@ -152,7 +148,7 @@ subroutine fill_mass_matrix_icepack(mesh) integer(kind=int_kind) :: flag=0 ,iflag=0 type(t_mesh), intent(in), target :: mesh -#include "../associate_mesh.h" +#include "associate_mesh.h" allocate(col_pos(nx)) @@ -201,7 +197,7 @@ end subroutine fill_mass_matrix_icepack !======================================================================= - subroutine solve_low_order_icepack(mesh, trc) + subroutine solve_low_order_icepack(ice, mesh, trc) !============================ ! Low-order solution @@ -213,23 +209,20 @@ subroutine solve_low_order_icepack(mesh, trc) ! is implemented as the difference between the consistent and lumped mass ! matrices acting on the field from the previous time step. The consistent ! mass matrix on the lhs is replaced with the lumped one. - + USE MOD_ICE use mod_mesh - use o_mesh - use i_param - use g_parsup - - + implicit none integer(kind=int_kind) :: row, clo, clo2, cn, location(100) real (kind=dbl_kind) :: gamma + type(t_ice), target, intent(in) :: ice type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc -#include "../associate_mesh.h" +#include "associate_mesh.h" - gamma = ice_gamma_fct ! Added diffusivity parameter + gamma = ice%ice_gamma_fct ! Added diffusivity parameter ! Adjust it to ensure posivity of solution do row = 1, nx_nh @@ -242,7 +235,7 @@ subroutine solve_low_order_icepack(mesh, trc) (1.0_WP-gamma) * trc(row) enddo - call exchange_nod(trl) + call exchange_nod(trl, p_partit) ! Low-order solution must be known to neighbours @@ -253,11 +246,7 @@ end subroutine solve_low_order_icepack subroutine solve_high_order_icepack(mesh, trc) use mod_mesh - use o_mesh - use i_param - use g_parsup - - + implicit none integer(kind=int_kind) :: n,i,clo,clo2,cn,location(100),row @@ -266,7 +255,7 @@ subroutine solve_high_order_icepack(mesh, trc) type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Taylor-Galerkin solution @@ -275,7 +264,7 @@ subroutine solve_high_order_icepack(mesh, trc) d_tr(row) = rhs_tr(row) / area(1,row) end do - call exchange_nod(d_tr) + call exchange_nod(d_tr, p_partit) ! Iterate do n = 1, num_iter_solve - 1 @@ -290,14 +279,14 @@ subroutine solve_high_order_icepack(mesh, trc) do row = 1, nx_nh d_tr(row) = trl(row) enddo - call exchange_nod(d_tr) + call exchange_nod(d_tr, p_partit) enddo end subroutine solve_high_order_icepack !======================================================================= - subroutine fem_fct_icepack(mesh, trc) + subroutine fem_fct_icepack(ice, mesh, trc) !============================ ! Flux corrected transport algorithm for tracer advection @@ -307,23 +296,20 @@ subroutine fem_fct_icepack(mesh, trc) ! transport (FEM-FCT) for the Euler and Navier-Stokes equation, ! Int. J. Numer. Meth. Fluids, 7 (1987), 1093--1109) as described by Kuzmin and ! Turek. (kuzmin@math.uni-dortmund.de) - + USE MOD_ICE use mod_mesh - use o_mesh use o_param - use i_param - use g_parsup - - + integer(kind=int_kind) :: icoef(3,3), n, q, elem, elnodes(3), row real (kind=dbl_kind), allocatable, dimension(:) :: tmax, tmin real (kind=dbl_kind) :: vol, flux, ae, gamma type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc - -#include "../associate_mesh.h" + type(t_ice), target, intent(in) :: ice + +#include "associate_mesh.h" - gamma = ice_gamma_fct ! It should coinside with gamma in + gamma = ice%ice_gamma_fct ! It should coinside with gamma in ! ts_solve_low_order !========================== @@ -415,7 +401,7 @@ subroutine fem_fct_icepack(mesh, trc) enddo ! pminus and pplus are to be known to neighbouting PE - call exchange_nod(icepminus, icepplus) + call exchange_nod(icepminus, icepplus, p_partit) !======================== ! Limiting @@ -449,7 +435,7 @@ subroutine fem_fct_icepack(mesh, trc) enddo enddo - call exchange_nod(trc) + call exchange_nod(trc, p_partit) deallocate(tmin, tmax) @@ -457,24 +443,21 @@ end subroutine fem_fct_icepack !======================================================================= - subroutine tg_rhs_div_icepack(mesh, trc) - + subroutine tg_rhs_div_icepack(ice, mesh, trc) + USE MOD_ICE use mod_mesh - use o_mesh use o_param - use i_param - use g_parsup - - + implicit none real (kind=dbl_kind) :: diff, entries(3), um, vm, vol, dx(3), dy(3) integer(kind=int_kind) :: n, q, row, elem, elnodes(3) real (kind=dbl_kind) :: c_1, c_2, c_3, c_4, c_x, entries2(3) + type(t_ice), target, intent(in) :: ice type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Computes the rhs in a Taylor-Galerkin way (with urrayspwind ! type of correction for the advection operator). @@ -509,14 +492,14 @@ subroutine tg_rhs_div_icepack(mesh, trc) row = elnodes(n) do q = 1, 3 - entries(q) = vol*ice_dt*((c1-p5*ice_dt*c_4)*(dx(n)*(um+uvel(elnodes(q)))+ & + entries(q) = vol*ice%ice_dt*((c1-p5*ice%ice_dt*c_4)*(dx(n)*(um+uvel(elnodes(q)))+ & dy(n)*(vm+vvel(elnodes(q))))/12.0_dbl_kind - & - p5*ice_dt*(c_1*dx(n)*dx(q)+c_2*dy(n)*dy(q)+c_3*(dx(n)*dy(q)+dx(q)*dy(n)))) - entries2(q) = p5*ice_dt*(dx(n)*(um+uvel(elnodes(q))) + & + p5*ice%ice_dt*(c_1*dx(n)*dx(q)+c_2*dy(n)*dy(q)+c_3*(dx(n)*dy(q)+dx(q)*dy(n)))) + entries2(q) = p5*ice%ice_dt*(dx(n)*(um+uvel(elnodes(q))) + & dy(n)*(vm+vvel(elnodes(q)))-dx(q)*(um+uvel(row)) - & dy(q)*(vm+vvel(row))) enddo - c_x = vol*ice_dt*c_4*(sum(trc(elnodes))+trc(elnodes(n))+sum(entries2*trc(elnodes))) / 12.0_dbl_kind + c_x = vol*ice%ice_dt*c_4*(sum(trc(elnodes))+trc(elnodes(n))+sum(entries2*trc(elnodes))) / 12.0_dbl_kind rhs_tr(row) = rhs_tr(row) + sum(entries * trc(elnodes)) + c_x rhs_trdiv(row) = rhs_trdiv(row) - c_x enddo @@ -529,12 +512,8 @@ end subroutine tg_rhs_div_icepack subroutine update_for_div_icepack(mesh, trc) use mod_mesh - use o_mesh use o_param - use i_param - use g_parsup - - + implicit none integer(kind=int_kind) :: n, i, clo, clo2, cn, & @@ -544,7 +523,7 @@ subroutine update_for_div_icepack(mesh, trc) type(t_mesh), target, intent(in) :: mesh real(kind=dbl_kind), dimension(nx), intent(inout) :: trc -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Computes Taylor-Galerkin solution ! first approximation @@ -553,7 +532,7 @@ subroutine update_for_div_icepack(mesh, trc) d_tr(row) = rhs_trdiv(row) / area(1,row) enddo - call exchange_nod(d_tr) + call exchange_nod(d_tr, p_partit) ! Iterate @@ -569,7 +548,7 @@ subroutine update_for_div_icepack(mesh, trc) do row = 1, nx_nh d_tr(row) = trl(row) enddo - call exchange_nod(d_tr) + call exchange_nod(d_tr, p_partit) enddo trc = trc + d_tr @@ -578,31 +557,33 @@ end subroutine update_for_div_icepack !======================================================================= - subroutine fct_solve_icepack(mesh, trc) + subroutine fct_solve_icepack(ice, mesh, trc) use mod_mesh - + use MOD_ICE implicit none real(kind=dbl_kind), dimension(nx), intent(inout) :: trc type(t_mesh), target, intent(in) :: mesh + type(t_ice), target, intent(in) :: ice ! Driving sequence - call tg_rhs_div_icepack(mesh, trc) + call tg_rhs_div_icepack(ice, mesh, trc) call solve_high_order_icepack(mesh, trc) ! uses arrays of low-order solutions as temp ! storage. It should preceed the call of low ! order solution. - call solve_low_order_icepack(mesh, trc) - call fem_fct_icepack(mesh, trc) + call solve_low_order_icepack(ice, mesh, trc) + call fem_fct_icepack(ice, mesh, trc) call update_for_div_icepack(mesh, trc) end subroutine fct_solve_icepack !======================================================================= - module subroutine tracer_advection_icepack(mesh) + module subroutine tracer_advection_icepack(ice, mesh) use mod_mesh + use MOD_ICE use icepack_intfc, only: icepack_aggregate use icepack_itd, only: cleanup_itd use g_config, only: dt @@ -632,6 +613,7 @@ module subroutine tracer_advection_icepack(mesh) works type(t_mesh), target, intent(in) :: mesh + type(t_ice), target, intent(in) :: ice call icepack_query_parameters(heat_capacity_out=heat_capacity, & puny_out=puny) @@ -661,7 +643,7 @@ module subroutine tracer_advection_icepack(mesh) ! Advect each tracer do nt = 1, narr - call fct_solve_icepack ( mesh, works(:,nt) ) + call fct_solve_icepack (ice, mesh, works(:,nt) ) end do call work_to_state (ntrcr, narr, works(:,:)) diff --git a/src/icepack_drivers/icedrv_init.F90 b/src/icepack_drivers/icedrv_init.F90 index aaea17469..9294e424c 100644 --- a/src/icepack_drivers/icedrv_init.F90 +++ b/src/icepack_drivers/icedrv_init.F90 @@ -27,10 +27,10 @@ contains - subroutine init_state() + subroutine init_state(tracer) use icepack_intfc, only: icepack_aggregate - + use mod_tracer implicit none integer (kind=int_kind) :: & @@ -49,7 +49,8 @@ subroutine init_state() nt_ipnd, nt_aero, nt_fsd character(len=*), parameter :: subname='(init_state)' - + type(t_tracer_data), intent(in), target :: tracer + !----------------------------------------------------------------- ! query Icepack values !----------------------------------------------------------------- @@ -199,7 +200,7 @@ subroutine init_state() ! Set state variables !----------------------------------------------------------------- - call init_state_var() + call init_state_var(tracer) end subroutine init_state @@ -909,7 +910,7 @@ end subroutine init_faero !======================================================================= - module subroutine init_icepack(mesh) + module subroutine init_icepack(ice, tracer, mesh) use icepack_intfc, only: icepack_init_itd use icepack_intfc, only: icepack_init_itd_hist @@ -917,6 +918,8 @@ module subroutine init_icepack(mesh) use icepack_intfc, only: icepack_init_fsd_bounds use icepack_intfc, only: icepack_warnings_flush use mod_mesh + use mod_ice + use mod_tracer implicit none @@ -926,8 +929,9 @@ module subroutine init_icepack(mesh) tr_fsd, & ! from icepack wave_spec ! from icepack character(len=*), parameter :: subname='(icedrv_initialize)' - type(t_mesh), intent(in), target :: mesh - + type(t_mesh), intent(in), target :: mesh + type(t_tracer_data), intent(in), target :: tracer + type(t_ice), intent(inout), target :: ice call icepack_query_parameters(wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_aero_out=tr_aero) call icepack_query_tracer_flags(tr_zaero_out=tr_zaero) @@ -978,8 +982,8 @@ module subroutine init_icepack(mesh) endif call init_fsd - call fesom_to_icepack(mesh) - call init_state ! initialize the ice state + call fesom_to_icepack(ice, mesh) + call init_state(tracer) ! initialize the ice state call init_history_therm ! initialize thermo history variables if (tr_fsd .and. wave_spec) call init_wave_spec ! wave spectrum in ice @@ -996,15 +1000,17 @@ end subroutine init_icepack !======================================================================= - subroutine init_state_var () + subroutine init_state_var (tracer) use icepack_intfc, only: icepack_init_fsd use icepack_intfc, only: icepack_aggregate - use o_arrays, only: tr_arr + use mod_tracer implicit none ! local variables - + type(t_tracer_data), intent(in), target :: tracer + real(kind=WP), dimension(:,:), pointer :: tr_arr + integer (kind=int_kind) :: & i , & ! horizontal indices k , & ! ice layer index @@ -1033,7 +1039,7 @@ subroutine init_state_var () character(len=char_len_long), parameter :: ice_ic='default' character(len=*), parameter :: subname='(set_state_var)' - + tr_arr=>tracer%values(:,:) !----------------------------------------------------------------- ! query Icepack values !----------------------------------------------------------------- @@ -1103,7 +1109,7 @@ subroutine init_state_var () enddo do i = 1, nx - if (tr_arr(1,i,1) < 0.0_dbl_kind) then ! + if (tr_arr(1,i) < 0.0_dbl_kind) then ! do n = 1, ncat ! ice volume, snow volume aicen(i,n) = ainit(n) diff --git a/src/icepack_drivers/icedrv_io.F90 b/src/icepack_drivers/icedrv_io.F90 index cb07079f4..7d8bb7c4d 100644 --- a/src/icepack_drivers/icedrv_io.F90 +++ b/src/icepack_drivers/icedrv_io.F90 @@ -21,7 +21,6 @@ module subroutine init_io_icepack(mesh) use mod_mesh - use g_parsup use io_meandata, only: def_stream3D, def_stream2D implicit none @@ -64,7 +63,7 @@ module subroutine init_io_icepack(mesh) namelist /nml_listsize / io_listsize namelist /nml_list_icepack / io_list_icepack -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Get the tracers information from icepack call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & @@ -95,7 +94,7 @@ module subroutine init_io_icepack(mesh) if (mype==0) write(*,*) ' file : ', 'namelist.io',' open ok' else if (mype==0) write(*,*) 'ERROR: --> bad opening file : ','namelist.io',' ; iostat=',iost - call par_ex + call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype) stop end if open( unit=nm_icepack_unit, file='namelist.icepack', form='formatted', access='sequential', status='old', iostat=iost ) @@ -103,7 +102,7 @@ module subroutine init_io_icepack(mesh) if (mype==0) write(*,*) ' file : ', 'namelist.icepack',' open ok' else if (mype==0) write(*,*) 'ERROR: --> bad opening file : ','namelist.icepack',' ; iostat=',iost - call par_ex + call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype) stop end if @@ -124,147 +123,147 @@ module subroutine init_io_icepack(mesh) do i=1, io_listsize select case (trim(io_list_icepack(i)%id)) case ('aice0 ') - call def_stream2D(nod2D, nx_nh, 'aice0', 'open water fraction', 'none', aice0(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'aice0', 'open water fraction', 'none', aice0(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('aicen ') - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'aicen', 'sea ice concentration', 'none', aicen(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'aicen', 'sea ice concentration', 'none', aicen(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) case ('vicen ') - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vicen', 'volume per unit area of ice', 'm', vicen(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vicen', 'volume per unit area of ice', 'm', vicen(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) case ('vsnon ') - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vsnon', 'volume per unit area of snow', 'm', vsnon(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vsnon', 'volume per unit area of snow', 'm', vsnon(:,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) case ('aice ') - call def_stream2D(nod2D, nx_nh, 'aice', 'sea ice concentration', 'none', aice(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'aice', 'sea ice concentration', 'none', aice(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('vice ') - call def_stream2D(nod2D, nx_nh, 'vice', 'volume per unit area of ice', 'm', vice(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'vice', 'volume per unit area of ice', 'm', vice(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('vsno ') - call def_stream2D(nod2D, nx_nh, 'vsno', 'volume per unit area of snow', 'm', vsno(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'vsno', 'volume per unit area of snow', 'm', vsno(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) ! Sea ice velocity components case ('uvel ') - call def_stream2D(nod2D, nx_nh, 'uvel', 'x-component of ice velocity', 'm/s', uvel(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'uvel', 'x-component of ice velocity', 'm/s', uvel(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('vvel ') - call def_stream2D(nod2D, nx_nh, 'vvel', 'y-component of ice velocity', 'm/s', vvel(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'vvel', 'y-component of ice velocity', 'm/s', vvel(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) ! Sea ice or snow surface temperature case ('Tsfc ') - call def_stream2D(nod2D, nx_nh, 'Tsfc', 'sea ice surf. temperature', 'degC', trcr(:,nt_Tsfc), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'Tsfc', 'sea ice surf. temperature', 'degC', trcr(:,nt_Tsfc), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('Tsfcn ') - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'Tsfcn', 'sea ice surf. temperature', 'degC', trcrn(:,nt_Tsfc,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'Tsfcn', 'sea ice surf. temperature', 'degC', trcrn(:,nt_Tsfc,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('strength ') - call def_stream2D(nod2D, nx_nh, 'strength', 'sea ice strength', 'N', strength(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'strength', 'sea ice strength', 'N', strength(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) ! If the following tracers are not defined they will not be outputed case ('iagen ') if (tr_iage) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'iage', 'sea ice age', 's', trcrn(:,nt_iage,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'iage', 'sea ice age', 's', trcrn(:,nt_iage,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('FYn ') if (tr_FY) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'FY', 'first year ice', 'none', trcrn(:,nt_FY,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'FY', 'first year ice', 'none', trcrn(:,nt_FY,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('lvln ') if (tr_lvl) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'alvl', 'ridged sea ice area', 'none', trcrn(:,nt_alvl,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vlvl', 'ridged sea ice volume', 'm', trcrn(:,nt_vlvl,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'alvl', 'ridged sea ice area', 'none', trcrn(:,nt_alvl,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'vlvl', 'ridged sea ice volume', 'm', trcrn(:,nt_vlvl,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('pond_cesmn') if (tr_pond_cesm) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('pond_topon') if (tr_pond_topo) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'ipnd', 'melt pond refrozen lid thickness', 'm', trcrn(:,nt_ipnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'ipnd', 'melt pond refrozen lid thickness', 'm', trcrn(:,nt_ipnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('pond_lvln ') if (tr_pond_lvl) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'ipnd', 'melt pond refrozen lid thickness', 'm', trcrn(:,nt_ipnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'apnd', 'melt pond area fraction', 'none', trcrn(:,nt_apnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'hpnd', 'melt pond depth', 'm', trcrn(:,nt_hpnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'ipnd', 'melt pond refrozen lid thickness', 'm', trcrn(:,nt_ipnd,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('brinen ') if (tr_brine) then - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'fbri', 'volume fraction of ice with dynamic salt', 'none', trcrn(:,nt_fbri,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), 'fbri', 'volume fraction of ice with dynamic salt', 'none', trcrn(:,nt_fbri,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end if case ('qicen ') do k = 1,nilyr ! Separate variable for each sea ice layer write(trname,'(A6,i1)') 'qicen_', k write(longname,'(A22,i1)') 'sea ice enthalpy lyr: ', k units='J/m3' - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_qice+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_qice+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end do case ('sicen ') do k = 1,nilyr ! Separate variable for each sea ice layer write(trname,'(A6,i1)') 'sicen_', k write(longname,'(A22,i1)') 'sea ice salinity lyr: ', k units='psu' - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_sice+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_sice+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end do case ('qsnon ') do k = 1,nslyr ! Separate variable for each snow layer write(trname,'(A6,i1)') 'qsnon_', k write(longname,'(A19,i1)') 'snow enthalpy lyr: ', k units='J/m3' - call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_qsno+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh, .true.) + call def_stream3D((/ncat, nod2D/), (/ncat, nx_nh/), trim(trname), trim(longname), trim(units), trcrn(:,nt_qsno+k-1,:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh, .true.) end do ! Average over categories case ('iage ') if (tr_iage) then - call def_stream2D(nod2D, nx_nh, 'iage', 'sea ice age', 's', trcr(:,nt_iage), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'iage', 'sea ice age', 's', trcr(:,nt_iage), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('FY ') if (tr_FY) then - call def_stream2D(nod2D, nx_nh, 'FY', 'first year ice', 'none', trcr(:,nt_FY), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'FY', 'first year ice', 'none', trcr(:,nt_FY), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('lvl ') if (tr_lvl) then - call def_stream2D(nod2D, nx_nh, 'alvl', 'ridged sea ice area', 'none', trcr(:,nt_alvl), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'vlvl', 'ridged sea ice volume', 'm', trcr(:,nt_vlvl), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'alvl', 'ridged sea ice area', 'none', trcr(:,nt_alvl), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'vlvl', 'ridged sea ice volume', 'm', trcr(:,nt_vlvl), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('pond_cesm ') if (tr_pond_cesm) then - call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('pond_topo ') if (tr_pond_topo) then - call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'ipnd', 'melt pond refrozen lid thickness', 'm', trcr(:,nt_ipnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'ipnd', 'melt pond refrozen lid thickness', 'm', trcr(:,nt_ipnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('pond_lvl ') if (tr_pond_lvl) then - call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) - !call def_stream2D(nod2D, nx_nh, 'ipnd', 'melt pond refrozen lid thickness', 'm', trcr(:,nt_ipnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'apnd', 'melt pond area fraction', 'none', trcr(:,nt_apnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + call def_stream2D(nod2D, nx_nh, 'hpnd', 'melt pond depth', 'm', trcr(:,nt_hpnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) + !call def_stream2D(nod2D, nx_nh, 'ipnd', 'melt pond refrozen lid thickness', 'm', trcr(:,nt_ipnd), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('brine ') if (tr_brine) then - call def_stream2D(nod2D, nx_nh, 'fbri', 'volume fraction of ice with dynamic salt', 'none', trcr(:,nt_fbri), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'fbri', 'volume fraction of ice with dynamic salt', 'none', trcr(:,nt_fbri), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end if case ('qice ') do k = 1,nilyr ! Separate variable for each sea ice layer write(trname,'(A6,i1)') 'qicen_', k write(longname,'(A22,i1)') 'sea ice enthalpy lyr: ', k units='J/m3' - call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_qice+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_qice+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end do case ('sice ') do k = 1,nilyr ! Separate variable for each sea ice layer write(trname,'(A6,i1)') 'sicen_', k write(longname,'(A22,i1)') 'sea ice salinity lyr: ', k units='psu' - call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_sice+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_sice+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end do case ('qsno ') do k = 1,nslyr ! Separate variable for each snow layer write(trname,'(A6,i1)') 'qsnon_', k write(longname,'(A19,i1)') 'snow enthalpy lyr: ', k units='J/m3' - call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_qsno+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, trim(trname), trim(longname), trim(units), trcr(:,nt_qsno+k-1), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) end do case ('rdg_conv ') - call def_stream2D(nod2D, nx_nh, 'rdg_conv', 'Convergence term for ridging', '1/s', rdg_conv(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'rdg_conv', 'Convergence term for ridging', '1/s', rdg_conv(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case ('rdg_shear ') - call def_stream2D(nod2D, nx_nh, 'rdg_shear', 'Shear term for ridging', '1/s', rdg_shear(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, mesh) + call def_stream2D(nod2D, nx_nh, 'rdg_shear', 'Shear term for ridging', '1/s', rdg_shear(:), io_list_icepack(i)%freq, io_list_icepack(i)%unit, io_list_icepack(i)%precision, p_partit, mesh) case default if (mype==0) write(*,*) 'stream ', io_list_icepack(i)%id, ' is not defined !' end select @@ -279,7 +278,6 @@ end subroutine init_io_icepack module subroutine init_restart_icepack(year, mesh) use mod_mesh - use g_parsup use g_config, only: runid, ResultPath use io_restart, only: ip_id, def_variable_2d, def_dim @@ -311,7 +309,7 @@ module subroutine init_restart_icepack(year, mesh) tr_zaero, tr_bgc_Fe, & tr_bgc_hum -#include "../associate_mesh.h" +#include "associate_mesh.h" ! Get the tracers information from icepack call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & diff --git a/src/icepack_drivers/icedrv_main.F90 b/src/icepack_drivers/icedrv_main.F90 index 2daf1ea91..0f5f938ff 100644 --- a/src/icepack_drivers/icedrv_main.F90 +++ b/src/icepack_drivers/icedrv_main.F90 @@ -10,7 +10,7 @@ module icedrv_main use icedrv_kinds use icedrv_constants - use g_parsup, only: mype + use mod_partit implicit none @@ -64,6 +64,8 @@ module icedrv_main integer (kind=int_kind), save :: max_ntrcr ! number of tracers in total integer (kind=int_kind), save :: nfreq ! number of wave frequencies ! HARDWIRED FOR NOW integer (kind=int_kind), save :: ndtd ! dynamic time steps per thermodynamic time step + type(t_partit), pointer, save :: p_partit ! a pointer to the mesh partitioning (has been accessed via "use g_parsup" in the previous versions) + integer (kind=int_kind), save :: mype ! a copy of a mype which has been accessed via "use g_parsup" in the previous versions !======================================================================= ! 2. State variabels for icepack @@ -751,8 +753,12 @@ module icedrv_main interface ! Read icepack namelists, setup the model parameter and write diagnostics - module subroutine set_icepack() + module subroutine set_icepack(ice, partit) + use mod_partit + use mod_ice implicit none + type(t_partit), intent(inout), target :: partit + type(t_ice) , intent(inout), target :: ice end subroutine set_icepack ! Set up hemispheric masks @@ -788,17 +794,23 @@ module subroutine init_history_bgc() end subroutine init_history_bgc ! Initialize all - module subroutine init_icepack(mesh) + module subroutine init_icepack(ice, tracer, mesh) use mod_mesh + use mod_tracer + use mod_ice implicit none - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_tracer_data), intent(in), target :: tracer + type(t_ice) , intent(inout), target :: ice end subroutine init_icepack ! Copy variables from fesom to icepack - module subroutine fesom_to_icepack(mesh) + module subroutine fesom_to_icepack(ice, mesh) use mod_mesh + use mod_ice implicit none type(t_mesh), intent(in), target :: mesh + type(t_ice), intent(inout), target :: ice end subroutine fesom_to_icepack ! Copy variables from icepack to fesom @@ -840,10 +852,12 @@ module subroutine icepack_to_fesom_single_point( & end subroutine icepack_to_fesom_single_point ! Trancers advection - module subroutine tracer_advection_icepack(mesh) + module subroutine tracer_advection_icepack(ice, mesh) use mod_mesh + use MOD_ICE implicit none type(t_mesh), intent(in), target :: mesh + type(t_ice), intent(in), target :: ice end subroutine tracer_advection_icepack ! Advection initialization @@ -854,11 +868,10 @@ module subroutine init_advection_icepack(mesh) end subroutine init_advection_icepack ! Driving subroutine for column physics - module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) + module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) use mod_mesh + use mod_ice use g_config, only: dt - use i_PARAM, only: whichEVP - use g_parsup use icepack_intfc, only: icepack_ice_strength implicit none real (kind=dbl_kind), intent(out) :: & @@ -866,6 +879,7 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) time_advec, & time_evp type(t_mesh), intent(in), target :: mesh + type(t_ice), intent(inout), target :: ice end subroutine step_icepack ! Initialize output diff --git a/src/icepack_drivers/icedrv_set.F90 b/src/icepack_drivers/icedrv_set.F90 index 4638073f3..dbfc55a1d 100644 --- a/src/icepack_drivers/icedrv_set.F90 +++ b/src/icepack_drivers/icedrv_set.F90 @@ -18,22 +18,21 @@ use icepack_intfc, only: icepack_query_tracer_indices use icepack_intfc, only: icepack_warnings_flush use icepack_intfc, only: icepack_warnings_aborted - use icedrv_system, only: icedrv_system_abort + use icedrv_system, only: icedrv_system_abort, icedrv_system_init contains - module subroutine set_icepack() - - use g_parsup, only: myDim_nod2D, eDim_nod2D, & - myDim_elem2D, eDim_elem2D, & - mpi_comm_fesom - use i_param, only: whichEVP - use i_param, only: cd_oce_ice, Pstar, c_pressure - use i_therm_param, only: albw + module subroutine set_icepack(ice, partit) + use MOD_ICE +! use i_param, only: whichEVP +! use i_param, only: cd_oce_ice, Pstar, c_pressure +! use i_therm_param, only: albw implicit none ! local variables + type(t_partit), intent(inout), target :: partit + type(t_ice), intent(inout), target :: ice character(len=char_len) :: nml_filename, diag_filename character(len=*), parameter :: subname = '(set_icepack)' @@ -256,12 +255,13 @@ module subroutine set_icepack() !----------------------------------------------------------------- ! Derived quantities used by the icepack model !----------------------------------------------------------------- - - nx = myDim_nod2D + eDim_nod2D - nx_elem = myDim_elem2D + eDim_elem2D - nx_nh = myDim_nod2D - nx_elem_nh = myDim_elem2D - + call icedrv_system_init(partit) + p_partit => partit + nx = p_partit%myDim_nod2D + p_partit%eDim_nod2D + nx_elem = p_partit%myDim_elem2D + p_partit%eDim_elem2D + nx_nh = p_partit%myDim_nod2D + nx_elem_nh = p_partit%myDim_elem2D + mype = p_partit%mype ncat = nicecat ! number of categories nfsd = nfsdcat ! number of floe size categories nilyr = nicelyr ! number of ice layers per category @@ -433,13 +433,13 @@ module subroutine set_icepack() if (mype == 0) write(nu_diag,*) '-----------------------------------' if (mype == 0) write(nu_diag,*) ' ' - if (whichEVP == 1 .or. whichEVP == 2) then + if (ice%whichEVP == 1 .or. ice%whichEVP == 2) then if (mype == 0) write (nu_diag,*) 'WARNING: whichEVP = 1 or 2' if (mype == 0) write (nu_diag,*) 'Adaptive or Modified EVP formulations' if (mype == 0) write (nu_diag,*) 'are not allowed when using Icepack (yet).' if (mype == 0) write (nu_diag,*) 'Standard EVP will be used instead' if (mype == 0) write (nu_diag,*) ' whichEVP = 0' - whichEVP = 0 + ice%whichEVP = 0 endif if (ncat == 1 .and. kitd == 1) then @@ -818,10 +818,10 @@ module subroutine set_icepack() ! Make the namelists.ice and namelist.icepack consistent (icepack wins ! over fesom) - cd_oce_ice = dragio - albw = albocn - Pstar = P_star - c_pressure = C_star + ice%cd_oce_ice = dragio + ice%thermo%albw= albocn + ice%Pstar = P_star + ice%c_pressure = C_star call icepack_init_parameters(ustar_min_in=ustar_min, Cf_in=Cf, & @@ -870,7 +870,7 @@ module subroutine set_icepack() if (icepack_warnings_aborted()) call icedrv_system_abort(string=subname, & file=__FILE__,line= __LINE__) - call mpi_barrier(mpi_comm_fesom,mpi_error) + call mpi_barrier(p_partit%mpi_comm_fesom, mpi_error) end subroutine set_icepack diff --git a/src/icepack_drivers/icedrv_step.F90 b/src/icepack_drivers/icedrv_step.F90 index ed5d047eb..5139454bf 100644 --- a/src/icepack_drivers/icedrv_step.F90 +++ b/src/icepack_drivers/icedrv_step.F90 @@ -1116,14 +1116,14 @@ end subroutine coupling_prep !======================================================================= - module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) + module subroutine step_icepack(ice, mesh, time_evp, time_advec, time_therm) use icepack_intfc, only: icepack_ice_strength use g_config, only: dt - use i_PARAM, only: whichEVP - use g_parsup use mod_mesh - + use mod_ice + use ice_EVPdynamics_interface + use ice_maEVPdynamics_interface implicit none integer (kind=int_kind) :: & @@ -1141,9 +1141,10 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) time_therm, & time_advec, & time_evp - + + type(t_ice), target, intent(inout) :: ice type(t_mesh), target, intent(in) :: mesh - + character(len=*), parameter :: subname='(ice_step)' @@ -1172,7 +1173,7 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) ! copy variables from fesom2 (also ice velocities) !----------------------------------------------------------------- - call fesom_to_icepack(mesh) + call fesom_to_icepack(ice, mesh) !----------------------------------------------------------------- ! tendencies needed by fesom @@ -1238,16 +1239,16 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) t2 = MPI_Wtime() - select case (whichEVP) + select case (ice%whichEVP) case (0) - call EVPdynamics(mesh) + call EVPdynamics (ice, p_partit, mesh) case (1) - call EVPdynamics_m(mesh) + call EVPdynamics_m(ice, p_partit, mesh) case (2) - call EVPdynamics_a(mesh) + call EVPdynamics_a(ice, p_partit, mesh) case default if (mype==0) write(*,*) 'A non existing EVP scheme specified!' - call par_ex + call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype) stop end select @@ -1258,7 +1259,7 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) ! update ice velocities !----------------------------------------------------------------- - call fesom_to_icepack(mesh) + call fesom_to_icepack(ice, mesh) !----------------------------------------------------------------- ! advect tracers @@ -1266,7 +1267,7 @@ module subroutine step_icepack(mesh, time_evp, time_advec, time_therm) t2 = MPI_Wtime() - call tracer_advection_icepack(mesh) + call tracer_advection_icepack(ice, mesh) t3 = MPI_Wtime() time_advec = t3 - t2 diff --git a/src/icepack_drivers/icedrv_system.F90 b/src/icepack_drivers/icedrv_system.F90 index 8a130bbd7..40ce82251 100644 --- a/src/icepack_drivers/icedrv_system.F90 +++ b/src/icepack_drivers/icedrv_system.F90 @@ -7,16 +7,16 @@ module icedrv_system use icedrv_kinds - use g_parsup, only: par_ex + use mod_parsup, only: par_ex use icedrv_constants, only: ice_stderr use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - + use mod_partit implicit none - public :: icedrv_system_abort + public :: icedrv_system_abort, icedrv_system_init private - + type(t_partit), save, pointer :: p_partit ! a pointer to the mesh partitioning (has been accessed via "use g_parsup" in the original code) !======================================================================= contains @@ -49,12 +49,21 @@ subroutine icedrv_system_abort(icell, istep, string, file, line) ! Stop FESOM2 - call par_ex(1) + call par_ex(p_partit%MPI_COMM_FESOM, p_partit%mype, 1) stop end subroutine icedrv_system_abort !======================================================================= + subroutine icedrv_system_init(partit) + implicit none + type(t_partit), intent(inout), target :: partit + + p_partit => partit + end subroutine icedrv_system_init + +!======================================================================= + end module icedrv_system diff --git a/src/icepack_drivers/icedrv_transfer.F90 b/src/icepack_drivers/icedrv_transfer.F90 index 6ba70afa6..15d36eb1b 100644 --- a/src/icepack_drivers/icedrv_transfer.F90 +++ b/src/icepack_drivers/icedrv_transfer.F90 @@ -10,7 +10,7 @@ contains - module subroutine fesom_to_icepack(mesh) + module subroutine fesom_to_icepack(ice, mesh) use g_forcing_arrays, only: Tair, shum, u_wind, v_wind, & ! Atmospheric forcing fields shortwave, longwave, prec_rain, & @@ -18,11 +18,12 @@ module subroutine fesom_to_icepack(mesh) use g_forcing_param, only: ncar_bulk_z_wind, ncar_bulk_z_tair, & ncar_bulk_z_shum use g_sbf, only: l_mslp - use i_arrays, only: S_oc_array, T_oc_array, & ! Ocean and sea ice fields - u_w, v_w, & - u_ice, v_ice, & - stress_atmice_x, stress_atmice_y - use i_param, only: cd_oce_ice ! Sea ice parameters +! use i_arrays, only: S_oc_array, T_oc_array, & ! Ocean and sea ice fields +! u_w, v_w, & +! stress_atmice_x, stress_atmice_y +! ! u_ice, v_ice, & + +! use i_param, only: cd_oce_ice ! Sea ice parameters use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters use icepack_intfc, only: icepack_sea_freezing_temperature @@ -31,8 +32,8 @@ module subroutine fesom_to_icepack(mesh) use g_config, only: dt use o_param, only: mstep use mod_mesh - use o_mesh - use g_parsup + use mod_tracer + use mod_ice use g_clock implicit none @@ -61,9 +62,18 @@ module subroutine fesom_to_icepack(mesh) cprho type(t_mesh), target, intent(in) :: mesh - -#include "../associate_mesh.h" - + type(t_ice), target, intent(inout) :: ice + real(kind=WP), dimension(:), pointer :: u_ice, v_ice, S_oc_array, T_oc_array, & + u_w, v_w, stress_atmice_x, stress_atmice_y +#include "associate_mesh.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + S_oc_array => ice%srfoce_salt(:) + T_oc_array => ice%srfoce_temp(:) + u_w => ice%srfoce_u(:) + v_w => ice%srfoce_v(:) + stress_atmice_x => ice%stress_atmice_x(:) + stress_atmice_y => ice%stress_atmice_y(:) ! Ice uvel(:) = u_ice(:) @@ -121,7 +131,7 @@ module subroutine fesom_to_icepack(mesh) do i = 1, nx ! ocean - ice stress - aux = sqrt((uvel(i)-uocn(i))**2+(vvel(i)-vocn(i))**2)*rhowat*cd_oce_ice + aux = sqrt((uvel(i)-uocn(i))**2+(vvel(i)-vocn(i))**2)*rhowat*ice%cd_oce_ice strocnxT(i) = aux*(uvel(i) - uocn(i)) strocnyT(i) = aux*(vvel(i) - vocn(i)) ! freezing - melting potential @@ -145,7 +155,7 @@ module subroutine fesom_to_icepack(mesh) rdg_shear(i) = ty / tvol enddo - call exchange_nod(rdg_conv, rdg_shear) + call exchange_nod(rdg_conv, rdg_shear, p_partit) ! Clock variables diff --git a/src/ifs_interface/ifs_interface.F90 b/src/ifs_interface/ifs_interface.F90 new file mode 100644 index 000000000..d9a6fc09a --- /dev/null +++ b/src/ifs_interface/ifs_interface.F90 @@ -0,0 +1,1531 @@ +!===================================================== +! IFS interface for calling FESOM2 as a subroutine. +! +! -Original code for NEMO by Kristian Mogensen, ECMWF. +! -Adapted to FESOM2 by Thomas Rackow, AWI, 2018. +!----------------------------------------------------- + +MODULE nemogcmcoup_steps + INTEGER :: substeps !per IFS timestep +END MODULE nemogcmcoup_steps + +SUBROUTINE nemogcmcoup_init( mype, icomm, inidate, initime, itini, itend, zstp, & + & lwaveonly, iatmunit, lwrite ) + + ! Initialize the FESOM model for single executable coupling + + USE par_kind !in ifs_modules.F90 + USE fesom_main_storage_module, only: fesom => f ! only: MPI_COMM_FESOM, mype (previously in g_parsup) + USE fesom_module, ONLY : fesom_init + USE g_config, only: dt + USE g_clock, only: timenew, daynew, yearnew, month, day_in_month + USE nemogcmcoup_steps, ONLY : substeps + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype ! was added to ifs/nemo/ininemo.F90 to allow diagnostics based on the first tasks only + INTEGER, INTENT(IN) :: icomm + ! Initial date (e.g. 20170906), time, initial timestep and final time step + INTEGER, INTENT(OUT) :: inidate, initime, itini, itend + ! Length of the time step + REAL(wpIFS), INTENT(OUT) :: zstp + + ! inherited from interface to NEMO, not used here: + ! Coupling to waves only + LOGICAL, INTENT(IN) :: lwaveonly + ! Logfile unit (used if >=0) + INTEGER :: iatmunit + ! Write to this unit + LOGICAL :: lwrite + ! FESOM might perform substeps + INTEGER :: itend_fesom + INTEGER :: i + NAMELIST/namfesomstep/substeps + + ! overwritten from value namelist + substeps=2 + OPEN(9,file='namfesomstep.in') + READ(9,namfesomstep) + CLOSE(9) + + fesom%partit%MPI_COMM_FESOM=icomm + + itini = 1 + CALL fesom_init(itend_fesom) !also sets mype and npes + itend=itend_fesom/substeps + if(fesom%mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! FESOM is initialized from within IFS.' + WRITE(0,*)'! get MPI_COMM_FESOM. =================' + WRITE(0,*)'! main_initialize done. ===============' + endif + + ! Set more information for the caller + + ! initial date and time (time is not used) + inidate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 + initime = 0 + if(fesom%mype==0) then + WRITE(0,*)'! FESOM initial date is ', inidate ,' ======' + WRITE(0,*)'! FESOM substeps are ', substeps ,' ======' + endif + + ! fesom timestep (as seen by IFS) + zstp = REAL(substeps,wpIFS)*dt + if(fesom%mype==0) then + WRITE(0,*)'! FESOM timestep as seen by IFS is ', real(zstp,4), 'sec (',substeps,'xdt)' + WRITE(0,*)'!======================================' + endif + +END SUBROUTINE nemogcmcoup_init + + +SUBROUTINE nemogcmcoup_coupinit( mypeIN, npesIN, icomm, & + & npoints, nlocmsk, ngloind ) + + ! FESOM modules + USE fesom_main_storage_module, only: fesom => f ! only: mype, npes, myDim_nod2D, eDim_nod2D, myDim_elem2D, eDim_elem2D, eXDim_elem2D, & + ! myDim_edge2D, eDim_edge2D, myList_nod2D, myList_elem2D + + ! Initialize single executable coupling + USE parinter + USE scripremap + USE interinfo + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mypeIN,npesIN,icomm + ! Gaussian grid information + ! Number of points + INTEGER, INTENT(IN) :: npoints + ! Integer mask and global indices + INTEGER, DIMENSION(npoints), INTENT(IN) :: nlocmsk, ngloind + INTEGER :: iunit = 0 + + ! Local variables + !type(t_mesh), target :: mesh + integer , pointer :: nod2D + integer , pointer :: elem2D + integer, pointer :: myDim_nod2D, eDim_nod2D + integer, dimension(:), pointer :: myList_nod2D + integer, pointer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + integer, dimension(:), pointer :: myList_elem2D + + ! Namelist containing the file names of the weights + CHARACTER(len=256) :: cdfile_gauss_to_T, cdfile_gauss_to_UV, & + & cdfile_T_to_gauss, cdfile_UV_to_gauss + CHARACTER(len=256) :: cdpathdist + LOGICAL :: lwritedist, lreaddist + LOGICAL :: lcommout + CHARACTER(len=128) :: commoutprefix + NAMELIST/namfesomcoup/cdfile_gauss_to_T,& + & cdfile_gauss_to_UV,& + & cdfile_T_to_gauss,& + & cdfile_UV_to_gauss,& + & cdpathdist, & + & lreaddist, & + & lwritedist, & + & lcommout, & + & commoutprefix,& + & lparbcast + + ! Global number of gaussian gridpoints + INTEGER :: nglopoints + ! Ocean grids accessed with NEMO modules + INTEGER :: noglopoints,nopoints + INTEGER, ALLOCATABLE, DIMENSION(:) :: omask,ogloind + ! SCRIP remapping data structures. + TYPE(scripremaptype) :: remap_gauss_to_T, remap_T_to_gauss, & + & remap_gauss_to_UV, remap_UV_to_gauss + ! Misc variables + INTEGER :: i,j,k,ierr + LOGICAL :: lexists + + ! associate the mesh, only what is needed here + ! #include "associate_mesh.h" + nod2D => fesom%mesh%nod2D + elem2D => fesom%mesh%elem2D + myDim_nod2D => fesom%partit%myDim_nod2D + eDim_nod2D => fesom%partit%eDim_nod2D + myList_nod2D(1:myDim_nod2D+eDim_nod2D) => fesom%partit%myList_nod2D + myDim_elem2D => fesom%partit%myDim_elem2D + eDim_elem2D => fesom%partit%eDim_elem2D + eXDim_elem2D => fesom%partit%eXDim_elem2D + myList_elem2D(1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => fesom%partit%myList_elem2D + + ! here FESOM knows about the (total number of) MPI tasks + + if(fesom%mype==0) then + write(*,*) 'MPI has been initialized in the atmospheric model' + write(*, *) 'Running on ', fesom%npes, ' PEs' + end if + + ! Read namelists + + cdfile_gauss_to_T = 'gausstoT.nc' + cdfile_gauss_to_UV = 'gausstoUV.nc' + cdfile_T_to_gauss = 'Ttogauss.nc' + cdfile_UV_to_gauss = 'UVtogauss.nc' + lcommout = .FALSE. + commoutprefix = 'parinter_comm' + cdpathdist = './' + lreaddist = .FALSE. + lwritedist = .FALSE. + + OPEN(9,file='namfesomcoup.in') + READ(9,namfesomcoup) + CLOSE(9) + + ! Global number of Gaussian gridpoints + + CALL mpi_allreduce( npoints, nglopoints, 1, & + & mpi_integer, mpi_sum, icomm, ierr) + + + if(fesom%mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! SCALARS =============================' + + WRITE(0,*)'Update FESOM global scalar points' + endif + + noglopoints=nod2D + nopoints=myDim_nod2d + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + omask(:)= 1 ! all points are ocean points + ogloind(1:myDim_nod2d)= myList_nod2D(1:myDim_nod2d) ! global index for local point number + + ! Could be helpful later: + ! Replace global numbering with a local one + ! tmp(1:nod2d)=0 + ! DO n=1, myDim_nod2D+eDim_nod2D + ! tmp(myList_nod2D(n))=n + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean T-grid + + IF (lreaddist) THEN + CALL parinter_read( fesom%mype, fesom%npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_T,remap_gauss_to_T,& + & fesom%mype,fesom%npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_T,remap_gauss_to_T) + ENDIF + CALL parinter_init( fesom%mype, fesom%npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_T, gausstoT, lcommout, TRIM(commoutprefix)//'_gtoT', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_T) + IF (lwritedist) THEN + CALL parinter_write( fesom%mype, fesom%npes, nglopoints, noglopoints, gausstoT, & + & cdpathdist,'ifs_to_fesom_gridT') + ENDIF + ENDIF + + ! From ocean T-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( fesom%mype, fesom%npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_T_to_gauss,remap_T_to_gauss,& + & fesom%mype,fesom%npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_T_to_gauss,remap_T_to_gauss) + ENDIF + + CALL parinter_init( fesom%mype, fesom%npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_T_to_gauss, Ttogauss, lcommout, TRIM(commoutprefix)//'_Ttog', & + & iunit ) + CALL scripremap_dealloc(remap_T_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( fesom%mype, fesom%npes, noglopoints, nglopoints, Ttogauss, & + & cdpathdist,'fesom_gridT_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + + + if(fesom%mype==0) then + WRITE(0,*)'!======================================' + WRITE(0,*)'! VECTORS =============================' + + WRITE(0,*)'Update FESOM global vector points' + endif + noglopoints=elem2D + nopoints=myDim_elem2D + + ! Ocean mask and global indicies + + ALLOCATE(omask(MAX(nopoints,1)),ogloind(MAX(nopoints,1))) + + omask(:)= 1 ! all elements are in the ocean + ogloind(1:myDim_elem2D) = myList_elem2D(1:myDim_elem2D) ! global index for local element number + + ! Read the interpolation weights and setup the parallel interpolation + ! from atmosphere Gaussian grid to ocean UV-grid + + IF (lreaddist) THEN + CALL parinter_read( fesom%mype, fesom%npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_gauss_to_UV,remap_gauss_to_UV,& + & fesom%mype,fesom%npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_gauss_to_UV,remap_gauss_to_UV) + ENDIF + CALL parinter_init( fesom%mype, fesom%npes, icomm, & + & npoints, nglopoints, nlocmsk, ngloind, & + & nopoints, noglopoints, omask, ogloind, & + & remap_gauss_to_UV, gausstoUV, lcommout, TRIM(commoutprefix)//'_gtoUV', & + & iunit ) + CALL scripremap_dealloc(remap_gauss_to_UV) + IF (lwritedist) THEN + CALL parinter_write( fesom%mype, fesom%npes, nglopoints, noglopoints, gausstoUV, & + & cdpathdist,'ifs_to_fesom_gridUV') + ENDIF + ENDIF + + ! From ocean UV-grid to atmosphere Gaussian grid + + IF (lreaddist) THEN + CALL parinter_read( fesom%mype, fesom%npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs',lexists) + ENDIF + IF ((.NOT.lreaddist).OR.(.NOT.lexists)) THEN + IF (lparbcast) THEN + CALL scripremap_read_sgl(cdfile_UV_to_gauss,remap_UV_to_gauss,& + & fesom%mype,fesom%npes,icomm,.TRUE.) + ELSE + CALL scripremap_read(cdfile_UV_to_gauss,remap_UV_to_gauss) + ENDIF + + CALL parinter_init( fesom%mype, fesom%npes, icomm, & + & nopoints, noglopoints, omask, ogloind, & + & npoints, nglopoints, nlocmsk, ngloind, & + & remap_UV_to_gauss, UVtogauss, lcommout, TRIM(commoutprefix)//'_UVtog', & + & iunit ) + CALL scripremap_dealloc(remap_UV_to_gauss) + IF (lwritedist) THEN + CALL parinter_write( fesom%mype, fesom%npes, noglopoints, nglopoints, UVtogauss, & + & cdpathdist,'fesom_gridUV_to_ifs') + ENDIF + ENDIF + + DEALLOCATE(omask,ogloind) + +END SUBROUTINE nemogcmcoup_coupinit + + +SUBROUTINE nemogcmcoup_lim2_get( mype, npes, icomm, & + & nopoints, pgsst, pgist, pgalb, & + & pgifr, pghic, pghsn, pgucur, pgvcur, & + & pgistl, licelvls ) + + ! Interpolate sst, ice; surf T; albedo; concentration; thickness, + ! snow thickness and currents from the FESOM grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind ! in ifs_modules.F90 + USE fesom_main_storage_module, only: fesom => f + !USE o_ARRAYS, ONLY : UV ! tr_arr is now tracers, UV in dynamics derived type + !USE i_arrays, ONLY : m_ice, a_ice, m_snow + !USE i_therm_param, ONLY : tmelt + USE g_rotate_grid, only: vector_r2g + USE parinter + USE scripremap + USE interinfo + + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgist, pgalb, pgifr, pghic, pghsn, pgucur, pgvcur + REAL(wpIFS), DIMENSION(nopoints,3) :: pgistl + LOGICAL :: licelvls + + !type(t_mesh), target :: mesh + real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D + integer, dimension(:,:) , pointer :: elem2D_nodes + integer, pointer :: myDim_nod2D, eDim_nod2D + integer, pointer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + real(kind=wpIFS), dimension(:), pointer :: a_ice, m_ice, m_snow, ice_temp, ice_alb + real(kind=wpIFS) , pointer :: tmelt + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + REAL(wpIFS), DIMENSION(fesom%partit%myDim_nod2D) :: zsend + REAL(wpIFS), DIMENSION(fesom%partit%myDim_elem2D) :: zsendU, zsendV + INTEGER :: elnodes(3) + REAL(wpIFS) :: rlon, rlat + + ! Loop variables + INTEGER :: n, elem, ierr + + !#include "associate_mesh.h" + ! associate what is needed only + myDim_nod2D => fesom%partit%myDim_nod2D + eDim_nod2D => fesom%partit%eDim_nod2D + + myDim_elem2D => fesom%partit%myDim_elem2D + eDim_elem2D => fesom%partit%eDim_elem2D + eXDim_elem2D => fesom%partit%eXDim_elem2D + + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D(:,:) + elem2D_nodes(1:3, 1:myDim_elem2D+eDim_elem2D+eXDim_elem2D) => fesom%mesh%elem2D_nodes(:,:) + a_ice => fesom%ice%data(1)%values(:) + m_ice => fesom%ice%data(2)%values(:) + m_snow => fesom%ice%data(3)%values(:) + ice_temp => fesom%ice%data(4)%values(:) + ice_alb => fesom%ice%atmcoupl%ice_alb(:) + tmelt => fesom%ice%thermo%tmelt ! scalar const. + + ! =================================================================== ! + ! Pack SST data and convert to K. 'pgsst' is on Gauss grid. + do n=1,myDim_nod2D + zsend(n)=fesom%tracers%data(1)%values(1, n) +tmelt ! sea surface temperature [K], + ! (1=surface, n=node, data(1/2)=T/S) + enddo + + ! Interpolate SST + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgsst ) + + + ! =================================================================== ! + ! Pack ice fraction data [0..1] and interpolate: 'pgifr' on Gauss. + ! zsend(:)=a_ice(:) + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, a_ice, & + & nopoints, pgifr ) + + + ! =================================================================== ! + ! Pack ice temperature data (already in K) + zsend(:)=ice_temp + + ! Interpolate ice surface temperature: 'pgist' on Gaussian grid. + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgist ) + + + ! =================================================================== ! + ! Pack ice albedo data and interpolate: 'pgalb' on Gaussian grid. + zsend(:)=ice_alb + + ! Interpolate ice albedo + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pgalb ) + + + ! =================================================================== ! + ! Pack ice thickness data and interpolate: 'pghic' on Gaussian grid. + zsend(:)=m_ice(:)/max(a_ice(:),0.01) ! ice thickness (mean over ice) + + ! Interpolation of average ice thickness + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pghic ) + + + ! =================================================================== ! + ! Pack snow thickness data and interpolate: 'pghsn' on Gaussian grid. + zsend(:)=m_snow(:)/max(a_ice(:),0.01) ! snow thickness (mean over ice) + + ! Interpolation of snow thickness + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & myDim_nod2D, zsend, & + & nopoints, pghsn ) + + + ! =================================================================== ! + ! Surface currents need to be rotated to geographical grid + + ! Pack u(v) surface currents + zsendU(:)=fesom%dynamics%UV(1,1,1:myDim_elem2D) + zsendV(:)=fesom%dynamics%UV(2,1,1:myDim_elem2D) !UV includes eDim, leave those away here + + do elem=1, myDim_elem2D + + ! compute element midpoints + elnodes=elem2D_nodes(:,elem) + rlon=sum(coord_nod2D(1,elnodes))/3.0_wpIFS + rlat=sum(coord_nod2D(2,elnodes))/3.0_wpIFS + + ! Rotate vectors to geographical coordinates (r2g) + call vector_r2g(zsendU(elem), zsendV(elem), rlon, rlat, 0) ! 0-flag for rot. coord + + end do + +#ifdef FESOM_TODO + + ! We need to sort out the non-unique global index before we + ! can couple currents + + ! Interpolate: 'pgucur' and 'pgvcur' on Gaussian grid. + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_elem2D, zsendU, & + & nopoints, pgucur ) + + CALL parinter_fld( mype, npes, icomm, UVtogauss, & + & myDim_elem2D, zsendV, & + & nopoints, pgvcur ) + +#else + + pgucur(:) = 0.0 + pgvcur(:) = 0.0 + +#endif + +#ifndef FESOM_TODO + + if(mype==0) then + WRITE(0,*)'Everything implemented except ice level temperatures (licelvls).' + endif + +#else + + ! Ice level temperatures + + IF (licelvls) THEN + +#if defined key_lim2 + + DO jl = 1, 3 + + ! Pack ice temperatures data at level jl(already in K) + + jk = 0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = jk + 1 + zsend(jk) = tbif (ji,jj,jl) + ENDDO + ENDDO + + ! Interpolate ice temperature at level jl + + CALL parinter_fld( mype, npes, icomm, Ttogauss, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zsend, & + & nopoints, pgistl(:,jl) ) + + ENDDO + +#else + WRITE(0,*)'licelvls needs to be sorted for LIM3' + CALL abort +#endif + + ENDIF + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_get') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_get',1,zhook_handle) + +#endif + +END SUBROUTINE nemogcmcoup_lim2_get + + +SUBROUTINE nemogcmcoup_lim2_update( mype, npes, icomm, & + & npoints, & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, dqdt_ice, & + & evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoffIN, ocerunoff, tcc, lcc, tice_atm, & + & kt, ldebug, loceicemix, lqnsicefilt ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind !in ifs_modules.F90 + USE fesom_main_storage_module, only: fesom => f + !USE o_PARAM, ONLY : WP, use wpIFS from par_kind (IFS) + USE g_rotate_grid, only: vector_r2g, vector_g2r + USE g_forcing_arrays, only: shortwave, prec_rain, prec_snow, runoff, & + & evap_no_ifrac, sublimation !'longwave' only stand-alone, 'evaporation' filled later +! USE i_ARRAYS, only: stress_atmice_x, stress_atmice_y, oce_heat_flux, ice_heat_flux +! USE i_ARRAYS, only: oce_heat_flux, ice_heat_flux + USE o_ARRAYS, only: stress_atmoce_x, stress_atmoce_y + USE g_comm_auto ! exchange_nod does the halo exchange + + ! all needed? + USE parinter + USE scripremap + USE interinfo + + IMPLICIT NONE + + ! =================================================================== ! + ! Arguments ========================================================= ! + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & + & taux_oce, tauy_oce, taux_ice, tauy_ice, & + & qs___oce, qs___ice, qns__oce, qns__ice, & + & dqdt_ice, evap_tot, evap_ice, prcp_liq, prcp_sol, & + & runoffIN, ocerunoff, tcc, lcc, tice_atm + + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + ! QS/QNS mixed switch + LOGICAL, INTENT(IN) :: loceicemix + ! QNS ice filter switch (requires tice_atm to be sent) + LOGICAL, INTENT(IN) :: lqnsicefilt + + !type(t_mesh), target :: mesh + + ! Local variables + INTEGER :: n + integer, pointer :: myDim_nod2D, eDim_nod2D + REAL(wpIFS), parameter :: rhofwt = 1000. ! density of freshwater + + + ! Packed receive buffer + REAL(wpIFS), DIMENSION(fesom%partit%myDim_nod2D) :: zrecv + REAL(wpIFS), DIMENSION(fesom%partit%myDim_elem2D):: zrecvU, zrecvV + + + !#include "associate_mesh.h" + ! associate only the necessary things + real(kind=wpIFS), dimension(:,:), pointer :: coord_nod2D + real(kind=wpIFS), dimension(:) , pointer :: stress_atmice_x, stress_atmice_y + real(kind=wpIFS), dimension(:) , pointer :: oce_heat_flux, ice_heat_flux + myDim_nod2D => fesom%partit%myDim_nod2D + eDim_nod2D => fesom%partit%eDim_nod2D + coord_nod2D(1:2,1:myDim_nod2D+eDim_nod2D) => fesom%mesh%coord_nod2D(:,:) + stress_atmice_x => fesom%ice%stress_atmice_x + stress_atmice_y => fesom%ice%stress_atmice_y + oce_heat_flux => fesom%ice%atmcoupl%oce_flx_h(:) + ice_heat_flux => fesom%ice%atmcoupl%ice_flx_h(:) + + ! =================================================================== ! + ! Sort out incoming arrays from the IFS and put them on the ocean grid + + ! TODO + shortwave(:)=0. ! Done, updated below. What to do with shortwave over ice?? + !longwave(:)=0. ! Done. Only used in stand-alone mode. + prec_rain(:)=0. ! Done, updated below. + prec_snow(:)=0. ! Done, updated below. + evap_no_ifrac=0. ! Done, updated below. This is evap over ocean, does this correspond to evap_tot? + sublimation=0. ! Done, updated below. + ! + ice_heat_flux=0. ! Done. This is qns__ice currently. Is this the non-solar heat flux? ! non solar heat fluxes below ! (qns) + oce_heat_flux=0. ! Done. This is qns__oce currently. Is this the non-solar heat flux? + ! + runoff(:)=0. ! not used apparently. What is runoffIN, ocerunoff? + !evaporation(:)=0. + !ice_thermo_cpl.F90: !---- total evaporation (needed in oce_salt_balance.F90) + !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice + stress_atmice_x=0. ! Done, taux_ice + stress_atmice_y=0. ! Done, tauy_ice + stress_atmoce_x=0. ! Done, taux_oce + stress_atmoce_y=0. ! Done, tauy_oce + + + ! =================================================================== ! + !1. Interpolate ocean solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & + & myDim_nod2D, zrecv ) + + ! Unpack ocean solar radiation, without halo + shortwave(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(shortwave,fesom%partit) + + + ! =================================================================== ! + !2. Interpolate ice solar radiation to T grid + ! DO NOTHING + + + ! =================================================================== ! + !3. Interpolate ocean non-solar radiation to T grid (is this non-solar heat flux?) + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & + & myDim_nod2D, zrecv ) + + ! Unpack ocean non-solar, without halo + oce_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(oce_heat_flux,fesom%partit) + + + ! =================================================================== ! + !4. Interpolate non-solar radiation over ice to T grid (is this non-solar heat flux?) + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & + & myDim_nod2D, zrecv ) + + ! Unpack ice non-solar + ice_heat_flux(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + + ! Do the halo exchange + call exchange_nod(ice_heat_flux,fesom%partit) + + + ! =================================================================== ! + !5. D(q)/dT to T grid + ! DO NOTHING + + + ! =================================================================== ! + !6. Interpolate total evaporation to T grid + ! =================================================================== ! + !ice_thermo_cpl.F90: total evaporation (needed in oce_salt_balance.F90) + !ice_thermo_cpl.F90: evaporation = evap_no_ifrac*(1.-a_ice) + sublimation*a_ice + ! =================================================================== ! + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & + & myDim_nod2D, zrecv ) + + ! Unpack total evaporation, without halo + evap_no_ifrac(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign + + ! Do the halo exchange + call exchange_nod(evap_no_ifrac,fesom%partit) + + !7. Interpolate sublimation (evaporation over ice) to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack sublimation (evaporation over ice), without halo + sublimation(1:myDim_nod2D)=-zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s; change sign + + ! Do the halo exchange + call exchange_nod(sublimation,fesom%partit) + ! =================================================================== ! + ! =================================================================== ! + + + ! =================================================================== ! + !8. Interpolate liquid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & + & myDim_nod2D, zrecv ) + + ! Unpack liquid precipitation, without halo + prec_rain(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(prec_rain,fesom%partit) + + + ! =================================================================== ! + !9. Interpolate solid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & + & myDim_nod2D, zrecv ) + + ! Unpack solid precipitation, without halo + prec_snow(1:myDim_nod2D)=zrecv(1:myDim_nod2D)/rhofwt ! kg m^(-2) s^(-1) -> m/s + + ! Do the halo exchange + call exchange_nod(prec_snow,fesom%partit) + + + ! =================================================================== ! + !10. Interpolate runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack runoff, without halo + !runoff(1:myDim_nod2D)=zrecv(1:myDim_nod2D) !conversion?? + ! + ! Do the halo exchange + !call exchange_nod(runoff,fesom%partit) + ! + !11. Interpolate ocean runoff to T grid + ! + !CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + ! & myDim_nod2D, zrecv ) + ! + ! Unpack ocean runoff + ! ?? + + !12. Interpolate total cloud fractions to T grid (tcc) + ! + !13. Interpolate low cloud fractions to T grid (lcc) + + + ! =================================================================== ! + ! STRESSES + + ! OVER OCEAN: + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_oce, & + & myDim_nod2D, zrecv ) + + ! Unpack x stress atm->oce, without halo; then do halo exchange + stress_atmoce_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmoce_x,fesom%partit) + + ! + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_oce, & + & myDim_nod2D, zrecv ) + + ! Unpack y stress atm->oce, without halo; then do halo exchange + stress_atmoce_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmoce_y,fesom%partit) + + ! =================================================================== ! + ! OVER ICE: + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, taux_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack x stress atm->ice, without halo; then do halo exchange + stress_atmice_x(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmice_x,fesom%partit) + + ! + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tauy_ice, & + & myDim_nod2D, zrecv ) + + ! Unpack y stress atm->ice, without halo; then do halo exchange + stress_atmice_y(1:myDim_nod2D)=zrecv(1:myDim_nod2D) + call exchange_nod(stress_atmice_y,fesom%partit) + + + ! =================================================================== ! + ! ROTATE VECTORS FROM GEOGRAPHIC TO FESOMS ROTATED GRID + + !if ((do_rotate_oce_wind .AND. do_rotate_ice_wind) .AND. rotated_grid) then + do n=1, myDim_nod2D+eDim_nod2D + call vector_g2r(stress_atmoce_x(n), stress_atmoce_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) !0-flag for rot. coord. + call vector_g2r(stress_atmice_x(n), stress_atmice_y(n), coord_nod2D(1, n), coord_nod2D(2, n), 0) + end do + !do_rotate_oce_wind=.false. + !do_rotate_ice_wind=.false. + !end if + + +#ifdef FESOM_TODO + + ! Packed receive buffer + REAL(wpIFS), DIMENSION((nlei-nldi+1)*(nlej-nldj+1)) :: zrecv + ! Unpacked fields on ORCA grids + REAL(wpIFS), DIMENSION(jpi,jpj) :: zqs___oce, zqs___ice, zqns__oce, zqns__ice + REAL(wpIFS), DIMENSION(jpi,jpj) :: zdqdt_ice, zevap_tot, zevap_ice, zprcp_liq, zprcp_sol + REAL(wpIFS), DIMENSION(jpi,jpj) :: zrunoff, zocerunoff + REAL(wpIFS), DIMENSION(jpi,jpj) :: ztmp, zicefr + ! Arrays for rotation + REAL(wpIFS), DIMENSION(jpi,jpj) :: zuu,zvu,zuv,zvv,zutau,zvtau + ! Lead fraction for both LIM2/LIM3 + REAL(wpIFS), DIMENSION(jpi,jpj) :: zfrld + ! Mask for masking for I grid + REAL(wpIFS) :: zmsksum + ! For summing up LIM3 contributions to ice temperature + REAL(wpIFS) :: zval,zweig + + ! Loop variables + INTEGER :: ji,jj,jk,jl + ! netCDF debugging output variables + CHARACTER(len=128) :: cdoutfile + INTEGER :: inum + REAL(wpIFS) :: zhook_handle ! Dr Hook handle + + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',0,zhook_handle) + IF(nn_timing == 1) CALL timing_start('nemogcmcoup_lim2_update') + + ! Allocate the storage data + + IF (.NOT.lallociceflx) THEN + ALLOCATE( & + & zsqns_tot(jpi,jpj), & + & zsqns_ice(jpi,jpj), & + & zsqsr_tot(jpi,jpj), & + & zsqsr_ice(jpi,jpj), & + & zsemp_tot(jpi,jpj), & + & zsemp_ice(jpi,jpj), & + & zsevap_ice(jpi,jpj), & + & zsdqdns_ice(jpi,jpj), & + & zssprecip(jpi,jpj), & + & zstprecip(jpi,jpj), & + & zstcc(jpi,jpj), & + & zslcc(jpi,jpj), & + & zsatmist(jpi,jpj), & + & zsqns_ice_add(jpi,jpj)& + & ) + lallociceflx = .TRUE. + ENDIF + IF (.NOT.lallocstress) THEN + ALLOCATE( & + & zsutau(jpi,jpj), & + & zsvtau(jpi,jpj), & + & zsutau_ice(jpi,jpj), & + & zsvtau_ice(jpi,jpj) & + & ) + lallocstress = .TRUE. + ENDIF + + ! Sort out incoming arrays from the IFS and put them on the ocean grid + + !1. Interpolate ocean solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean solar radiation + + zqs___oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !2. Interpolate ice solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qs___ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice solar radiation + + zqs___ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqs___ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !3. Interpolate ocean non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean non-solar radiation + + zqns__oce(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__oce(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !4. Interpolate ice non-solar radiation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, qns__ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice non-solar radiation + + zqns__ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zqns__ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !5. Interpolate D(q)/dT to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, dqdt_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack D(q)/D(T) + + zdqdt_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zdqdt_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !6. Interpolate total evaporation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_tot, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack total evaporation + + zevap_tot(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_tot(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !7. Interpolate evaporation over ice to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, evap_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack evaporation over ice + + zevap_ice(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zevap_ice(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !8. Interpolate liquid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_liq, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack liquid precipitation + + zprcp_liq(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_liq(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !9. Interpolate solid precipitation to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, prcp_sol, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack precipitation over ice + + zprcp_sol(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zprcp_sol(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !10. Interpolate runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, runoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack runoff + + zrunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zrunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !11. Interpolate ocean runoff to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, ocerunoff, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zocerunoff(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zocerunoff(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !12. Interpolate total cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zstcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zstcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + !13. Interpolate low cloud fractions to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, lcc, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ocean runoff + + zslcc(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zslcc(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! get sea ice fraction and lead fraction + +#if defined key_lim2 + zfrld(:,:) = frld(:,:) + zicefr(:,:) = 1 - zfrld(:,:) +#else + zicefr(:,:) = 0.0_wpIFS + DO jl = 1, jpl + zicefr(:,:) = zicefr(:,:) + a_i(:,:,jl) + ENDDO + zfrld(:,:) = 1 - zicefr(:,:) +#endif + + zsemp_tot(:,:) = zevap_tot(:,:) - zprcp_liq(:,:) - zprcp_sol(:,:) + zstprecip(:,:) = zprcp_liq(:,:) + zprcp_sol(:,:) + ! More consistent with NEMO, but does changes the results, so + ! we don't do it for now. + ! zsemp_tot(:,:) = zevap_tot(:,:) - zstprecip(:,:) + zsemp_ice(:,:) = zevap_ice(:,:) - zprcp_sol(:,:) + zssprecip(:,:) = - zsemp_ice(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zrunoff(:,:) + zsemp_tot(:,:) = zsemp_tot(:,:) - zocerunoff(:,:) + zsevap_ice(:,:) = zevap_ice(:,:) + + ! non solar heat fluxes ! (qns) + IF (loceicemix) THEN + zsqns_tot(:,:) = zqns__oce(:,:) + ELSE + zsqns_tot(:,:) = zfrld(:,:) * zqns__oce(:,:) + zicefr(:,:) * zqns__ice(:,:) + ENDIF + zsqns_ice(:,:) = zqns__ice(:,:) + ztmp(:,:) = zfrld(:,:) * zprcp_sol(:,:) * lfus ! add the latent heat of solid precip. melting + + zsqns_tot(:,:) = zsqns_tot(:,:) - ztmp(:,:) ! over free ocean + ! solar heat fluxes ! (qsr) + + IF (loceicemix) THEN + zsqsr_tot(:,:) = zqs___oce(:,:) + ELSE + zsqsr_tot(:,:) = zfrld(:,:) * zqs___oce(:,:) + zicefr(:,:) * zqs___ice(:,:) + ENDIF + zsqsr_ice(:,:) = zqs___ice(:,:) + + IF( ln_dm2dc ) THEN ! modify qsr to include the diurnal cycle + zsqsr_tot(:,:) = sbc_dcy( zsqsr_tot(:,:) ) + zsqsr_ice(:,:) = sbc_dcy( zsqsr_ice(:,:) ) + ENDIF + + zsdqdns_ice(:,:) = zdqdt_ice(:,:) + + ! Apply lateral boundary condition + + CALL lbc_lnk(zsqns_tot, 'T', 1.0) + CALL lbc_lnk(zsqns_ice, 'T', 1.0) + CALL lbc_lnk(zsqsr_tot, 'T', 1.0) + CALL lbc_lnk(zsqsr_ice, 'T', 1.0) + CALL lbc_lnk(zsemp_tot, 'T', 1.0) + CALL lbc_lnk(zsemp_ice, 'T', 1.0) + CALL lbc_lnk(zsdqdns_ice, 'T', 1.0) + CALL lbc_lnk(zssprecip, 'T', 1.0) + CALL lbc_lnk(zstprecip, 'T', 1.0) + CALL lbc_lnk(zstcc, 'T', 1.0) + CALL lbc_lnk(zslcc, 'T', 1.0) + + ! Interpolate atmospheric ice temperature to T grid + + CALL parinter_fld( mype, npes, icomm, gausstoT, npoints, tice_atm, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack atmospheric ice temperature + + zsatmist(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zsatmist(ji,jj) = zrecv(jk) + ENDDO + ENDDO + CALL lbc_lnk(zsatmist, 'T', 1.0) + + zsqns_ice_add(:,:) = 0.0_wpIFS + + ! Use the dqns_ice filter + + IF (lqnsicefilt) THEN + + ! Add filtr to qns_ice + +#if defined key_lim2 + ztmp(:,:) = tn_ice(:,:,1) +#else + DO jj = nldj, nlej + DO ji = nldi, nlei + zval=0.0 + zweig=0.0 + DO jl = 1, jpl + zval = zval + tn_ice(ji,jj,jl) * a_i(ji,jj,jl) + zweig = zweig + a_i(ji,jj,jl) + ENDDO + IF ( zweig > 0.0 ) THEN + ztmp(ji,jj) = zval /zweig + ELSE + ztmp(ji,jj) = rt0 + ENDIF + ENDDO + ENDDO + CALL lbc_lnk(ztmp, 'T', 1.0) +#endif + + WHERE ( zicefr(:,:) > .001_wpIFS ) + zsqns_ice_add(:,:) = zsdqdns_ice(:,:) * ( ztmp(:,:) - zsatmist(:,:) ) + END WHERE + + zsqns_ice(:,:) = zsqns_ice(:,:) + zsqns_ice_add(:,:) + + ENDIF + + ! Interpolate u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_oce, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zsutau, zsvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zsutau, 'U', -1.0 ) + CALL lbc_lnk( zsvtau, 'V', -1.0 ) + + ! Interpolate ice u-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on U grid + + zuu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to U grid + + CALL parinter_fld( mype, npes, icomm, gausstoU, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on U grid + + zvu(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvu(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice u-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints,taux_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice u stress on V grid + + zuv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zuv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Interpolate ice v-stress to V grid + + CALL parinter_fld( mype, npes, icomm, gausstoV, npoints, tauy_ice, & + & ( nlei - nldi + 1 ) * ( nlej - nldj + 1 ), zrecv ) + + ! Unpack ice v stress on V grid + + zvv(:,:) = 0.0 + DO jj = nldj, nlej + DO ji = nldi, nlei + jk = ( jj - nldj ) * ( nlei - nldi + 1 ) + ( ji - nldi + 1 ) + zvv(ji,jj) = zrecv(jk) + ENDDO + ENDDO + + ! Rotate stresses from en to ij and put u,v stresses on U,V grids + + CALL repcmo( zuu, zvu, zuv, zvv, zutau, zvtau ) + + ! Apply lateral boundary condition on u,v stresses on the U,V grids + + CALL lbc_lnk( zutau, 'U', -1.0 ) + CALL lbc_lnk( zvtau, 'V', -1.0 ) + +#if defined key_lim2_vp + + ! Convert to I grid for LIM2 for key_lim_vp + DO jj = 2, jpjm1 ! (U,V) ==> I + DO ji = 2, jpim1 ! NO vector opt. + zmsksum = umask(ji-1,jj,1) + umask(ji-1,jj-1,1) + zsutau_ice(ji,jj) = ( umask(ji-1,jj,1) * zutau(ji-1,jj) + & + & umask(ji-1,jj-1,1) * zutau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsutau_ice(ji,jj) = zsutau_ice(ji,jj) / zmsksum + ENDIF + zmsksum = vmask(ji,jj-1,1) + vmask(ji-1,jj-1,1) + zsvtau_ice(ji,jj) = ( vmask(ji,jj-1,1) * zvtau(ji,jj-1) + & + & vmask(ji-1,jj-1,1) * zvtau(ji-1,jj-1) ) + IF ( zmsksum > 0.0 ) THEN + zsvtau_ice(ji,jj) = zsvtau_ice(ji,jj) / zmsksum + ENDIF + END DO + END DO + +#else + + zsutau_ice(:,:) = zutau(:,:) + zsvtau_ice(:,:) = zvtau(:,:) + +#endif + + CALL lbc_lnk( zsutau_ice, 'I', -1.0 ) + CALL lbc_lnk( zsvtau_ice, 'I', -1.0 ) + + ! Optionally write files write the data on the ORCA grid via IOM. + + IF (ldebug) THEN + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau' , zsutau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau' , zsvtau ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsutau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsutau_ice' , zsutau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsvtau_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsvtau_ice' , zsvtau_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_tot' , zsqns_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice' , zsqns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_tot' , zsqsr_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqsr_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqsr_ice' , zsqsr_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_tot_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_tot' , zsemp_tot ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsemp_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsemp_ice' , zsemp_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsdqdns_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsdqdns_ice' , zsdqdns_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zssprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zssprecip' , zssprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstprecip_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstprecip' , zstprecip ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsevap_ice_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsevap_ice' , zsevap_ice ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zstcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zstcc' , zstcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zslcc_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zslcc' , zslcc ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsatmist_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsatmist' , zsatmist ) + CALL iom_close( inum ) + WRITE(cdoutfile,'(A,I8.8)') 'zsqns_ice_add_',kt + CALL iom_open( TRIM(cdoutfile), inum, ldwrt = .TRUE., kiolib = jprstlib) + CALL iom_rstput( kt, kt, inum, 'zsqns_ice_add' , zsqns_ice_add ) + CALL iom_close( inum ) + ENDIF + + IF(nn_timing == 1) CALL timing_stop('nemogcmcoup_lim2_update') + IF(lhook) CALL dr_hook('nemogcmcoup_lim2_update',1,zhook_handle) + +#else + + !FESOM part + !WRITE(0,*)'nemogcmcoup_lim2_update partially implemented. Proceeding...' + !CALL par_ex + +#endif + +END SUBROUTINE nemogcmcoup_lim2_update + + +SUBROUTINE nemogcmcoup_step( istp, icdate, ictime ) + + USE g_clock, only: yearnew, month, day_in_month + USE fesom_main_storage_module, only: fesom => f ! mype + USE fesom_module, ONLY : fesom_runloop + USE nemogcmcoup_steps, ONLY : substeps + IMPLICIT NONE + + ! Arguments + + ! Time step + INTEGER, INTENT(IN) :: istp + + ! Data and time from NEMO + INTEGER, INTENT(OUT) :: icdate, ictime + + if(fesom%mype==0) then + WRITE(0,*)'! IFS at timestep ', istp, '. Do ', substeps , 'FESOM timesteps...' + endif + CALL fesom_runloop(substeps) + + ! Compute date and time at the end of the time step + + icdate = yearnew*10000 + month*100 + day_in_month ! e.g. 20170906 + ictime = 0 ! (time is not used) + + if(fesom%mype==0) then + WRITE(0,*)'! FESOM date at end of timestep is ', icdate ,' ======' + endif + +#ifdef FESOM_TODO + iye = ndastp / 10000 + imo = ndastp / 100 - iye * 100 + ida = MOD( ndastp, 100 ) + CALL greg2jul( 0, 0, 0, ida, imo, iye, zjul ) + zjul = zjul + ( nsec_day + 0.5_wpIFS * rdttra(1) ) / 86400.0_wpIFS + CALL jul2greg( iss, imm, ihh, ida, imo, iye, zjul ) + icdate = iye * 10000 + imo * 100 + ida + ictime = ihh * 10000 + imm * 100 + iss +#endif + +END SUBROUTINE nemogcmcoup_step + + +SUBROUTINE nemogcmcoup_final + + USE fesom_main_storage_module, only: fesom => f ! mype + USE fesom_module, ONLY : fesom_finalize + + ! Finalize the FESOM model + + IMPLICIT NONE + + if(fesom%mype==0) then + WRITE(*,*)'Finalization of FESOM from IFS.' + endif + CALL fesom_finalize + +END SUBROUTINE nemogcmcoup_final diff --git a/src/ifs_interface/ifs_modules.F90 b/src/ifs_interface/ifs_modules.F90 new file mode 100644 index 000000000..5e18ad10e --- /dev/null +++ b/src/ifs_interface/ifs_modules.F90 @@ -0,0 +1,1857 @@ +#define __MYFILE__ 'ifs_modules.F90' +#define key_mpp_mpi +! Set of modules needed by the interface to IFS. +! +! -Original code by Kristian Mogensen, ECMWF. + +MODULE par_kind + IMPLICIT NONE + INTEGER, PUBLIC, PARAMETER :: & !: Floating point section + sp = SELECTED_REAL_KIND( 6, 37), & !: single precision (real 4) + dp = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + wpIFS = SELECTED_REAL_KIND(12,307), & !: double precision (real 8) + ik = SELECTED_INT_KIND(6) !: integer precision +END MODULE par_kind + +MODULE nctools + + ! Utility subroutines for netCDF access + ! Modified : MAB (nf90, handle_error, LINE&FILE) + ! Modifled : KSM (new shorter name) + + USE netcdf + + PUBLIC ldebug_netcdf, nchdlerr + LOGICAL :: ldebug_netcdf = .FALSE. ! Debug switch for netcdf + +CONTAINS + + SUBROUTINE nchdlerr(status,lineno,filename) + + ! Error handler for netCDF access + IMPLICIT NONE + + + INTEGER :: status ! netCDF return status + INTEGER :: lineno ! Line number (usually obtained from + ! preprocessing __LINE__,__MYFILE__) + CHARACTER(len=*),OPTIONAL :: filename + + IF (status/=nf90_noerr) THEN + WRITE(*,*)'Netcdf error, code ',status + IF (PRESENT(filename)) THEN + WRITE(*,*)'In file ',filename,' in line ',lineno + ELSE + WRITE(*,*)'In line ',lineno + END IF + WRITE(*,'(2A)')' Error message : ',nf90_strerror(status) + CALL abort + ENDIF + + END SUBROUTINE nchdlerr + +!---------------------------------------------------------------------- +END MODULE nctools + +MODULE scrippar + INTEGER, PARAMETER :: scripdp = SELECTED_REAL_KIND(12,307) + INTEGER, PARAMETER :: scriplen = 80 +END MODULE scrippar + +MODULE scripgrid + + USE nctools + USE scrippar + + IMPLICIT NONE + + TYPE scripgridtype + INTEGER :: grid_size + INTEGER :: grid_corners + INTEGER :: grid_rank + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_dims + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: grid_center_lon + INTEGER, ALLOCATABLE, DIMENSION(:) :: grid_imask + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lat + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: grid_corner_lon + CHARACTER(len=scriplen) :: grid_center_lat_units + CHARACTER(len=scriplen) :: grid_center_lon_units + CHARACTER(len=scriplen) :: grid_imask_units + CHARACTER(len=scriplen) :: grid_corner_lat_units + CHARACTER(len=scriplen) :: grid_corner_lon_units + CHARACTER(len=scriplen) :: title + END TYPE scripgridtype + +CONTAINS + + SUBROUTINE scripgrid_read( cdfilename, grid ) + + CHARACTER(len=*) :: cdfilename + TYPE(scripgridtype) :: grid + + INTEGER :: ncid, dimid, varid + + CALL scripgrid_init(grid) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_corners),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=grid%grid_rank),& + & __LINE__,__MYFILE__) + + CALL scripgrid_alloc(grid) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',grid%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,grid%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',grid%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_read + + SUBROUTINE scripgrid_write( cdgridfile, grid ) + + CHARACTER(len=*) :: cdgridfile + TYPE(scripgridtype) :: grid + + INTEGER :: ncid + INTEGER :: ioldfill + INTEGER :: idimsize,idimxsize,idimysize,idimcorners,idimrank + INTEGER :: idims1rank(1),idims1size(1),idims2(2) + INTEGER :: iddims,idcentlat,idcentlon,idimask,idcornlat,idcornlon + INTEGER :: igriddims(2) + + ! Setup netcdf file + + CALL nchdlerr(nf90_create(TRIM(cdgridfile),nf90_clobber,ncid),& + & __LINE__,__MYFILE__) + + ! Define dimensions + + CALL nchdlerr(nf90_def_dim(ncid,'grid_size',& + & grid%grid_size,idimsize),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_corners',& + & grid%grid_corners,idimcorners),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_def_dim(ncid,'grid_rank',& + & grid%grid_rank,idimrank),& + & __LINE__,__MYFILE__) + + idims1rank(1) = idimrank + + idims1size(1) = idimsize + + idims2(1) = idimcorners + idims2(2) = idimsize + + ! Define variables + + CALL nchdlerr(nf90_def_var(ncid,'grid_dims',& + & nf90_int,idims1rank,iddims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lat',& + & nf90_double,idims1size,idcentlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlat,'units',& + & grid%grid_center_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_center_lon',& + & nf90_double,idims1size,idcentlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcentlon,'units',& + & grid%grid_center_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_imask',& + & nf90_int,idims1size,idimask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idimask,'units',& + & grid%grid_imask_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lat',& + & nf90_double,idims2,idcornlat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlat,'units',& + & grid%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_var(ncid,'grid_corner_lon',& + & nf90_double,idims2,idcornlon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idcornlon,'units',& + & grid%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & TRIM(grid%title)),& + & __LINE__,__MYFILE__) + + ! End of netCDF definition phase + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + ! Write variables + + + CALL nchdlerr(nf90_put_var(ncid,iddims,grid%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlat,& + & grid%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcentlon,& + & grid%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idimask,& + & grid%grid_imask), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlat,& + & grid%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idcornlon,& + & grid%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ! Close file + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripgrid_write + + SUBROUTINE scripgrid_init( grid ) + + TYPE(scripgridtype) :: grid + + grid%grid_size=0 + grid%grid_corners=0 + grid%grid_rank=0 + grid%grid_center_lat_units='' + grid%grid_center_lon_units='' + grid%grid_imask_units='' + grid%grid_corner_lat_units='' + grid%grid_corner_lon_units='' + grid%title='' + + END SUBROUTINE scripgrid_init + + SUBROUTINE scripgrid_alloc( grid ) + + TYPE(scripgridtype) :: grid + + IF ( (grid%grid_size == 0) .OR. & + & (grid%grid_corners == 0) .OR. & + & (grid%grid_rank == 0) ) THEN + WRITE(*,*)'scripgridtype not initialized' + CALL abort + ENDIF + + ALLOCATE( & + & grid%grid_dims(grid%grid_rank), & + & grid%grid_center_lat(grid%grid_size), & + & grid%grid_center_lon(grid%grid_size), & + & grid%grid_corner_lat(grid%grid_corners, grid%grid_size), & + & grid%grid_corner_lon(grid%grid_corners, grid%grid_size), & + & grid%grid_imask(grid%grid_size) & + & ) + + END SUBROUTINE scripgrid_alloc + + SUBROUTINE scripgrid_dealloc( grid ) + + TYPE(scripgridtype) :: grid + + DEALLOCATE( & + & grid%grid_dims, & + & grid%grid_center_lat, & + & grid%grid_center_lon, & + & grid%grid_corner_lat, & + & grid%grid_corner_lon, & + & grid%grid_imask & + & ) + + END SUBROUTINE scripgrid_dealloc + +END MODULE scripgrid + +MODULE scripremap + +#if defined key_mpp_mpi + USE mpi +#endif + USE nctools + USE scrippar + USE scripgrid + + IMPLICIT NONE + + TYPE scripremaptype + INTEGER :: num_links + INTEGER :: num_wgts + TYPE(scripgridtype) :: src + TYPE(scripgridtype) :: dst + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_area + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: src_grid_frac + REAL(scripdp), ALLOCATABLE, DIMENSION(:) :: dst_grid_frac + INTEGER, ALLOCATABLE, DIMENSION(:) :: src_address + INTEGER, ALLOCATABLE, DIMENSION(:) :: dst_address + REAL(scripdp), ALLOCATABLE, DIMENSION(:,:) :: remap_matrix + CHARACTER(len=scriplen) :: src_grid_area_units + CHARACTER(len=scriplen) :: dst_grid_area_units + CHARACTER(len=scriplen) :: src_grid_frac_units + CHARACTER(len=scriplen) :: dst_grid_frac_units + CHARACTER(len=scriplen) :: title + CHARACTER(len=scriplen) :: normalization + CHARACTER(len=scriplen) :: map_method + CHARACTER(len=scriplen) :: history + CHARACTER(len=scriplen) :: conventions + END TYPE scripremaptype + +CONTAINS + + SUBROUTINE scripremap_read_work(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid, dimid, varid + LOGICAL :: lcorners + + lcorners=.TRUE. + + CALL scripremap_init(remap) + + CALL nchdlerr(nf90_open(TRIM(cdfilename),nf90_nowrite,ncid),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_size),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_size',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_size),& + & __LINE__,__MYFILE__) + + + IF (nf90_inq_dimid(ncid,'src_grid_corners',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + lcorners=.FALSE. + remap%src%grid_corners=1 + ENDIF + + IF (lcorners) THEN + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_corners',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_corners),& + & __LINE__,__MYFILE__) + ELSE + remap%dst%grid_corners=1 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'src_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%src%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'dst_grid_rank',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%dst%grid_rank),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_links',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_links),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=remap%num_wgts),& + & __LINE__,__MYFILE__) + + CALL scripremap_alloc(remap) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_dims',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_center_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + IF (lcorners) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lat',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_corner_lon',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + + ELSE + + remap%src%grid_corner_lat(:,:) = 0.0 + remap%src%grid_corner_lon(:,:) = 0.0 + remap%dst%grid_corner_lat(:,:) = 0.0 + remap%dst%grid_corner_lon(:,:) = 0.0 + remap%src%grid_corner_lat_units = '' + remap%src%grid_corner_lon_units = '' + remap%dst%grid_corner_lat_units = '' + remap%dst%grid_corner_lon_units = '' + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_imask',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_area',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_grid_frac',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,varid,'units',remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid), & + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'title',remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'normalization',remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'map_method',remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'history',remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'conventions',remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'dest_grid',remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_att(ncid,nf90_global,'source_grid',remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__,__MYFILE__) + + END SUBROUTINE scripremap_read_work + + SUBROUTINE scripremap_read(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + CALL scripremap_read_work(cdfilename,remap) + + END SUBROUTINE scripremap_read + + + SUBROUTINE scripremap_read_sgl(cdfilename,remap,& + & mype,nproc,mycomm,linteronly) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + INTEGER :: mype,nproc,mycomm + LOGICAL :: linteronly + + INTEGER, DIMENSION(8) :: isizes + INTEGER :: ierr, ip + + IF (mype==0) THEN + CALL scripremap_read_work(cdfilename,remap) +#if defined key_mpp_mpi + isizes(1)=remap%src%grid_size + isizes(2)=remap%dst%grid_size + isizes(3)=remap%src%grid_corners + isizes(4)=remap%dst%grid_corners + isizes(5)=remap%src%grid_rank + isizes(6)=remap%dst%grid_rank + isizes(7)=remap%num_links + isizes(8)=remap%num_wgts + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + ELSE + CALL mpi_bcast( isizes, 8, mpi_integer, 0, mycomm, ierr) + CALL scripremap_init(remap) + remap%src%grid_size=isizes(1) + remap%dst%grid_size=isizes(2) + remap%src%grid_corners=isizes(3) + remap%dst%grid_corners=isizes(4) + remap%src%grid_rank=isizes(5) + remap%dst%grid_rank=isizes(6) + remap%num_links=isizes(7) + remap%num_wgts=isizes(8) + CALL scripremap_alloc(remap) +#endif + ENDIF + +#if defined key_mpp_mpi + + IF (.NOT.linteronly) THEN + + CALL mpi_bcast( remap%src%grid_dims, remap%src%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lat, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon, remap%src%grid_corners*remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%dst%grid_dims, remap%dst%grid_rank, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon, remap%dst%grid_corners*remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src_grid_area, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac, remap%src%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac, remap%dst%grid_size, & + & mpi_double_precision, 0, mycomm, ierr ) + + CALL mpi_bcast( remap%src%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_center_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lat_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_corner_lon_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_area_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_grid_frac_units, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%title, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%normalization, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%map_method, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%history, scriplen, & + & mpi_character, 0, mycomm, ierr ) + CALL mpi_bcast( remap%conventions, scriplen, & + & mpi_character, 0, mycomm, ierr ) + ENDIF + + CALL mpi_bcast( remap%src_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst_address, remap%num_links, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%remap_matrix, remap%num_wgts*remap%num_links, & + & mpi_double_precision, 0, mycomm, ierr ) + CALL mpi_bcast( remap%src%grid_imask, remap%src%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + CALL mpi_bcast( remap%dst%grid_imask, remap%dst%grid_size, & + & mpi_integer, 0, mycomm, ierr ) + +#endif + END SUBROUTINE scripremap_read_sgl + + SUBROUTINE scripremap_write(cdfilename,remap) + + CHARACTER(len=*) :: cdfilename + TYPE(scripremaptype) :: remap + + INTEGER :: ncid + INTEGER :: dimsgs,dimdgs,dimsgc,dimdgc,dimsgr,dimdgr,dimnl,dimnw + INTEGER :: dims1(1),dims2(2) + INTEGER :: idsgd,iddgd,idsgea,iddgea,idsgeo,iddgeo + INTEGER :: idsgoa,idsgoo,iddgoa,iddgoo,idsgim,iddgim,idsgar,iddgar + INTEGER :: idsgf,iddgf,idsga,iddga,idsa,idda,idrm + + CALL nchdlerr(nf90_create(TRIM(cdfilename),nf90_clobber,ncid), & + & __LINE__, __MYFILE__ ) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_size',& + & remap%src%grid_size,dimsgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_size',& + & remap%dst%grid_size,dimdgs),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_corners',& + & remap%src%grid_corners,dimsgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_corners',& + & remap%dst%grid_corners,dimdgc),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'src_grid_rank',& + & remap%src%grid_rank,dimsgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'dst_grid_rank',& + & remap%dst%grid_rank,dimdgr),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & remap%num_links,dimnl),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & remap%num_wgts,dimnw),& + & __LINE__,__MYFILE__) + + dims1(1)=dimsgr + CALL nchdlerr(nf90_def_var(ncid,'src_grid_dims',& + & nf90_int,dims1,idsgd),& + & __LINE__,__MYFILE__) + + dims1(1)=dimdgr + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_dims',& + & nf90_int,dims1,iddgd), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lat',& + & nf90_double,dims1,idsgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lat',& + & nf90_double,dims1,iddgea), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_center_lon',& + & nf90_double,dims1,idsgeo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_center_lon',& + & nf90_double,dims1,iddgeo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lat',& + & nf90_double,dims2,idsgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimsgc + dims2(2)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_corner_lon',& + & nf90_double,dims2,idsgoo), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lat',& + & nf90_double,dims2,iddgoa), & + & __LINE__,__MYFILE__) + + dims2(1)=dimdgc + dims2(2)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_corner_lon',& + & nf90_double,dims2,iddgoo), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_imask',& + & nf90_int,dims1,idsgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_imask',& + & nf90_int,dims1,iddgim), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_area',& + & nf90_double,dims1,idsga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_area',& + & nf90_double,dims1,iddga), & + & __LINE__,__MYFILE__) + + dims1(1)=dimsgs + CALL nchdlerr(nf90_def_var(ncid,'src_grid_frac',& + & nf90_double,dims1,idsgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimdgs + CALL nchdlerr(nf90_def_var(ncid,'dst_grid_frac',& + & nf90_double,dims1,iddgf), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa), & + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda), & + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm), & + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,idsgea,'units',& + & remap%src%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgea,'units',& + & remap%dst%grid_center_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgeo,'units',& + & remap%src%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgeo,'units',& + & remap%dst%grid_center_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoa,'units',& + & remap%src%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgoo,'units',& + & remap%src%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoa,'units',& + & remap%dst%grid_corner_lat_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgoo,'units',& + & remap%dst%grid_corner_lon_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgim,'units',& + & remap%src%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgim,'units',& + & remap%dst%grid_imask_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsga,'units',& + & remap%src_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddga,'units',& + & remap%dst_grid_area_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,idsgf,'units',& + & remap%src_grid_frac_units),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,iddgf,'units',& + & remap%dst_grid_frac_units),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'title',& + & remap%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'normalization',& + & remap%normalization),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'map_method',& + & remap%map_method),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'history',& + & remap%history),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'conventions',& + & remap%conventions),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'dest_grid',& + & remap%dst%title),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_att(ncid,nf90_global,'source_grid',& + & remap%src%title),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgd,remap%src%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgd,remap%dst%grid_dims),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgea,remap%src%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgea,remap%dst%grid_center_lat),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgeo,remap%src%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,iddgeo,remap%dst%grid_center_lon),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsgoa,remap%src%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgoo,remap%src%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoa,remap%dst%grid_corner_lat),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgoo,remap%dst%grid_corner_lon),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgim,remap%src%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgim,remap%dst%grid_imask),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsga,remap%src_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddga,remap%dst_grid_area),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsgf,remap%src_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,iddgf,remap%dst_grid_frac),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idsa,remap%src_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idda,remap%dst_address),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_put_var(ncid,idrm,remap%remap_matrix),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE scripremap_write + + SUBROUTINE scripremap_init(remap) + + TYPE(scripremaptype) :: remap + + CALL scripgrid_init(remap%src) + CALL scripgrid_init(remap%dst) + remap%num_links = 0 + remap%num_wgts = 0 + remap%title='' + remap%normalization='' + remap%map_method='' + remap%history='' + remap%conventions='' + remap%src_grid_area_units='' + remap%dst_grid_area_units='' + remap%src_grid_frac_units='' + remap%dst_grid_frac_units='' + + END SUBROUTINE scripremap_init + + SUBROUTINE scripremap_alloc(remap) + + TYPE(scripremaptype) :: remap + + IF ( (remap%num_links == 0) .OR. & + & (remap%num_wgts == 0) ) THEN + WRITE(*,*)'scripremaptype not initialized' + CALL abort + ENDIF + + CALL scripgrid_alloc(remap%src) + CALL scripgrid_alloc(remap%dst) + + ALLOCATE( & + & remap%src_grid_area(remap%src%grid_size), & + & remap%dst_grid_area(remap%dst%grid_size), & + & remap%src_grid_frac(remap%src%grid_size), & + & remap%dst_grid_frac(remap%dst%grid_size), & + & remap%src_address(remap%num_links), & + & remap%dst_address(remap%num_links), & + & remap%remap_matrix(remap%num_wgts, remap%num_links) & + & ) + + END SUBROUTINE scripremap_alloc + + SUBROUTINE scripremap_dealloc(remap) + + TYPE(scripremaptype) :: remap + + DEALLOCATE( & + & remap%src_grid_area, & + & remap%dst_grid_area, & + & remap%src_grid_frac, & + & remap%dst_grid_frac, & + & remap%src_address, & + & remap%dst_address, & + & remap%remap_matrix & + & ) + + CALL scripgrid_dealloc(remap%src) + CALL scripgrid_dealloc(remap%dst) + + CALL scripremap_init(remap) + + END SUBROUTINE scripremap_dealloc + +END MODULE scripremap + +MODULE parinter + +#if defined key_mpp_mpi + USE mpi +#endif + USE scripremap + USE scrippar + USE nctools + + IMPLICIT NONE + + ! Type to contains interpolation information + ! (like what is in scripremaptype) and message + ! passing information + + TYPE parinterinfo + ! Number of local links + INTEGER :: num_links + ! Destination side + INTEGER, POINTER, DIMENSION(:) :: dst_address + ! Source addresses and work array + INTEGER, POINTER, DIMENSION(:) :: src_address + ! Local remap matrix + REAL(scripdp), POINTER, DIMENSION(:,:) :: remap_matrix + ! Message passing information + ! Array of local addresses for send buffer + ! packing + INTEGER, POINTER, DIMENSION(:) :: send_address + ! Sending bookkeeping + INTEGER :: nsendtot + INTEGER, POINTER, DIMENSION(:) :: nsend,nsdisp + ! Receiving bookkeeping + INTEGER :: nrecvtot + INTEGER, POINTER, DIMENSION(:) :: nrecv,nrdisp + END TYPE parinterinfo + +CONTAINS + + SUBROUTINE parinter_init( mype, nproc, mpi_comm, & + & nsrclocpoints, nsrcglopoints, srcmask, srcgloind, & + & ndstlocpoints, ndstglopoints, dstmask, dstgloind, & + & remap, pinfo, lcommout, commoutprefix, iunit ) + + ! Setup interpolation based on SCRIP format weights in + ! remap and the source/destination grids information. + + ! Procedure: + + ! 1) A global SCRIP remapping file is read on all processors. + ! 2) Find local destination points in the global grid. + ! 3) Find which processor needs source data and setup buffer + ! information for sending data. + ! 4) Construct new src remapping for buffer received + + ! All information is stored in the TYPE(parinterinfo) output + ! data type + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc, mpi_comm + ! Source grid local and global number of grid points + INTEGER, INTENT(IN) :: nsrclocpoints, nsrcglopoints + ! Source integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcmask + ! Source global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(nsrclocpoints) :: srcgloind + ! Destination grid local and global number of grid points + INTEGER, INTENT(IN) :: ndstlocpoints, ndstglopoints + ! Destination integer mask (0/1) for SCRIP compliance + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstmask + ! Destination global addresses of each local grid point + INTEGER, INTENT(IN), DIMENSION(ndstlocpoints) :: dstgloind + ! SCRIP remapping data + TYPE(scripremaptype) :: remap + ! Switch for output communication patterns + LOGICAL :: lcommout + CHARACTER(len=*) :: commoutprefix + ! Unit to use for output + INTEGER :: iunit + + ! Output arguments + + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + + ! Local variable + + ! Variable for glocal <-> local address/pe information + INTEGER, DIMENSION(nsrcglopoints) :: ilsrcmppmap, ilsrclocind + INTEGER, DIMENSION(nsrcglopoints) :: igsrcmppmap, igsrclocind + INTEGER, DIMENSION(ndstglopoints) :: ildstmppmap, ildstlocind + INTEGER, DIMENSION(ndstglopoints) :: igdstmppmap, igdstlocind + INTEGER, DIMENSION(nsrcglopoints) :: isrcpe,isrcpetmp + INTEGER, DIMENSION(nsrcglopoints) :: isrcaddtmp + INTEGER, DIMENSION(0:nproc-1) :: isrcoffset + INTEGER, DIMENSION(nproc) :: isrcno, isrcoff, isrccur + INTEGER, DIMENSION(nproc) :: ircvoff, ircvcur + INTEGER, DIMENSION(:), ALLOCATABLE :: isrctot, ircvtot + + ! Misc variable + INTEGER :: i,n,pe + INTEGER :: istatus + CHARACTER(len=256) :: cdfile + + ! Check that masks are consistent. + + ! Remark: More consistency tests between remapping information + ! and input argument could be code, but for now we settle + ! for checking the masks. + + ! Source grid + + DO i=1,nsrclocpoints + IF (srcmask(i)/=remap%src%grid_imask(srcgloind(i))) THEN + WRITE(iunit,*)'Source imask is inconsistent at ' + WRITE(iunit,*)'global index = ',srcgloind(i) + WRITE(iunit,*)'Source mask = ',srcmask(i) + WRITE(iunit,*)'Remap mask = ',remap%src%grid_imask(srcgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%src%grid_center_lat(srcgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%src%grid_center_lon(srcgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Destination grid + + DO i=1,ndstlocpoints + IF (dstmask(i)/=remap%dst%grid_imask(dstgloind(i))) THEN + WRITE(iunit,*)'Destination imask is inconsistent at ' + WRITE(iunit,*)'global index = ',dstgloind(i) + WRITE(iunit,*)'Destin mask = ',dstmask(i) + WRITE(iunit,*)'Remap mask = ',remap%dst%grid_imask(dstgloind(i)) + WRITE(iunit,*)'Latitude = ',remap%dst%grid_center_lat(dstgloind(i)) + WRITE(iunit,*)'Longitude = ',remap%dst%grid_center_lon(dstgloind(i)) + CALL flush(iunit) + CALL abort + ENDIF + ENDDO + + ! Setup global to local and vice versa mappings. + + ilsrcmppmap(:)=-1 + ilsrclocind(:)=0 + ildstmppmap(:)=-1 + ildstlocind(:)=0 + + DO i=1,nsrclocpoints + ilsrcmppmap(srcgloind(i))=mype + ilsrclocind(srcgloind(i))=i + ENDDO + + DO i=1,ndstlocpoints + ildstmppmap(dstgloind(i))=mype + ildstlocind(dstgloind(i))=i + ENDDO + +#if defined key_mpp_mpi + CALL mpi_allreduce(ilsrcmppmap,igsrcmppmap,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ilsrclocind,igsrclocind,nsrcglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstmppmap,igdstmppmap,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) + CALL mpi_allreduce(ildstlocind,igdstlocind,ndstglopoints, & + & mpi_integer,mpi_max,mpi_comm,istatus) +#else + igsrcmppmap(:)=ilsrcmppmap(:) + igsrclocind(:)=ilsrclocind(:) + igdstmppmap(:)=ildstmppmap(:) + igdstlocind(:)=ildstlocind(:) +#endif + + ! Optionally construct an ascii file listing what src and + ! dest points belongs to which task + + ! Since igsrcmppmap and igdstmppmap are global data only do + ! this for mype==0. + + IF (lcommout.AND.(mype==0)) THEN + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_srcmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,nsrcglopoints + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & igsrcmppmap(i)+1,remap%src%grid_imask(i) + ENDDO + CLOSE(9) + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dstmppmap_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO i=1,ndstglopoints + WRITE(9,*)remap%dst%grid_center_lat(i),& + & remap%dst%grid_center_lon(i), & + & igdstmppmap(i)+1,remap%dst%grid_imask(i) + ENDDO + CLOSE(9) + ENDIF + + ! + ! Standard interpolation in serial case is + ! + ! DO n=1,remap%num_links + ! zdst(remap%dst_address(n)) = zdst(remap%dst_address(n)) + & + ! & remap%remap_matrix(1,n)*zsrc(remap%src_address(n)) + ! END DO + ! + + ! In parallel we need to first find local number of links + + pinfo%num_links=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) & + & pinfo%num_links=pinfo%num_links+1 + ENDDO + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(1,pinfo%num_links)) + + ! Get local destination addresses + + n=0 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + n=n+1 + pinfo%dst_address(n)=& + & igdstlocind(remap%dst_address(i)) + pinfo%remap_matrix(:,n)=& + & remap%remap_matrix(:,i) + ENDIF + ENDDO + + ! Get sending processors maps. + + ! The same data point might need to be sent to many processors + ! so first construct a map for processors needing the data + + isrcpe(:)=-1 + DO i=1,remap%num_links + IF (igdstmppmap(remap%dst_address(i))==mype) THEN + isrcpe(remap%src_address(i))=& + & igsrcmppmap(remap%src_address(i)) + ENDIF + ENDDO + + ! Optionally write a set if ascii file listing which tasks + ! mype needs to send to communicate with + + IF (lcommout) THEN + ! Destination processors + WRITE(cdfile,'(A,I4.4,A)')commoutprefix//'_dsts_',mype+1,'.dat' + OPEN(9,file=cdfile) + DO pe=0,nproc-1 + IF (pe==mype) THEN + isrcpetmp(:)=isrcpe(:) + ENDIF +#if defined key_mpp_mpi + CALL mpi_bcast(isrcpetmp,nsrcglopoints,mpi_integer,pe,mpi_comm,istatus) +#endif + DO i=1,nsrcglopoints + IF (isrcpetmp(i)==mype) THEN + WRITE(9,*)remap%src%grid_center_lat(i),& + & remap%src%grid_center_lon(i), & + & pe+1,mype+1 + ENDIF + ENDDO + ENDDO + CLOSE(9) + ENDIF + + ! Get number of points to send to each processor + + ALLOCATE(pinfo%nsend(0:nproc-1)) + isrcno(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrcno(isrcpe(i)+1)=isrcno(isrcpe(i)+1)+1 + ENDIF + ENDDO +#if defined key_mpp_mpi + CALL mpi_alltoall(isrcno,1,mpi_integer, & + & pinfo%nsend(0:nproc-1),1,mpi_integer, & + & mpi_comm,istatus) +#else + pinfo%nsend(0:nproc-1) = isrcno(1:nproc) +#endif + pinfo%nsendtot=SUM(pinfo%nsend(0:nproc-1)) + + ! Construct sending buffer mapping. Data is mapping in + ! processor order. + + ALLOCATE(pinfo%send_address(pinfo%nsendtot)) + + ! Temporary arrays for mpi all to all. + + ALLOCATE(isrctot(SUM(isrcno(1:nproc)))) + ALLOCATE(ircvtot(SUM(pinfo%nsend(0:nproc-1)))) + + ! Offset for message parsing + + isrcoff(1)=0 + ircvoff(1)=0 + DO i=1,nproc-1 + isrcoff(i+1) = isrcoff(i) + isrcno(i) + ircvoff(i+1) = pinfo%nsend(i-1) + ircvoff(i) + ENDDO + + ! Pack indices i into a buffer + + isrccur(:)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0) THEN + isrccur(isrcpe(i)+1)=isrccur(isrcpe(i)+1)+1 + isrctot(isrccur(isrcpe(i)+1)+isrcoff(isrcpe(i)+1)) = i + ENDIF + ENDDO + + ! Send the data + +#if defined key_mpp_mpi + CALL mpi_alltoallv(& + & isrctot,isrccur,isrcoff,mpi_integer, & + & ircvtot,pinfo%nsend(0:nproc-1),ircvoff,mpi_integer, & + & mpi_comm,istatus) +#else + ircvtot(:)=isrctot(:) +#endif + + ! Get the send address. ircvtot will at this point contain the + ! addresses in the global index needed for message passing + + DO i=1,pinfo%nsendtot + pinfo%send_address(i)=igsrclocind(ircvtot(i)) + ENDDO + + ! Deallocate the mpi all to all arrays + + DEALLOCATE(ircvtot,isrctot) + + ! Get number of points to receive to each processor + + ALLOCATE(pinfo%nrecv(0:nproc-1)) + pinfo%nrecv(0:nproc-1)=0 + DO i=1,nsrcglopoints + IF (isrcpe(i)>=0 .AND. isrcpe(i)=0 .AND. isrcpe(i)0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'num_links',& + & pinfo%num_links,dimnl),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'num_wgts',& + & 1,dimnw),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nsendtot',& + & pinfo%nsendtot,dimnst),& + & __LINE__,__MYFILE__) + ENDIF + + IF (pinfo%nrecvtot>0) THEN + CALL nchdlerr(nf90_def_dim(ncid,'nrecvtot',& + & pinfo%nrecvtot,dimnrt),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_def_dim(ncid,'nproc',& + & nproc,dimnpr),& + & __LINE__,__MYFILE__) + + IF (pinfo%num_links>0) THEN + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'dst_address',& + & nf90_int,dims1,idda),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'src_address',& + & nf90_int,dims1,idsa),& + & __LINE__,__MYFILE__) + + dims2(1)=dimnw + dims2(2)=dimnl + CALL nchdlerr(nf90_def_var(ncid,'remap_matrix',& + & nf90_double,dims2,idrm),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsend',& + & nf90_int,dims1,idns),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + dims1(1)=dimnst + CALL nchdlerr(nf90_def_var(ncid,'send_address',& + & nf90_int,dims1,idsaa),& + & __LINE__,__MYFILE__) + + ENDIF + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrecv',& + & nf90_int,dims1,idnr),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nsdisp',& + & nf90_int,dims1,idnsp),& + & __LINE__,__MYFILE__) + + dims1(1)=dimnpr + CALL nchdlerr(nf90_def_var(ncid,'nrdisp',& + & nf90_int,dims1,idnrp),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_enddef(ncid),__LINE__,__MYFILE__) + + + IF (pinfo%num_links>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idda,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idsa,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idrm,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idns,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_put_var(ncid,idsaa,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_put_var(ncid,idnr,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnsp,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_put_var(ncid,idnrp,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + END SUBROUTINE parinter_write + + SUBROUTINE parinter_read( mype, nproc, & + & nsrcglopoints, ndstglopoints, & + & pinfo, cdpath, cdprefix, lexists ) + + ! Write pinfo information in a netCDF file in order to + ! be able to read it rather than calling parinter_init + + ! Input arguments. + + ! Message passing information + INTEGER, INTENT(IN) :: mype, nproc + ! Source grid local global number of grid points + INTEGER, INTENT(IN) :: nsrcglopoints + ! Destination grid global number of grid points + INTEGER, INTENT(IN) :: ndstglopoints + ! Interpolation and message passing information + TYPE(parinterinfo), INTENT(OUT) :: pinfo + ! Does the information exists + LOGICAL :: lexists + ! Path and file prefix + CHARACTER(len=*) :: cdpath, cdprefix + + ! Local variable + + ! Misc variable + CHARACTER(len=1024) :: cdfile + INTEGER :: ncid, dimid, varid, num_wgts + + WRITE(cdfile,'(A,2(I8.8,A),2(I4.4,A),A)') & + & TRIM(cdpath)//'/'//TRIM(cdprefix)//'_', & + & nsrcglopoints,'_',ndstglopoints,'_',mype,'_',nproc,'.nc' + + + lexists=nf90_open(TRIM(cdfile),nf90_nowrite,ncid)==nf90_noerr + + IF (lexists) THEN + + ! If num_links is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'num_links',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%num_links),& + & __LINE__,__MYFILE__) + ELSE + pinfo%num_links=0 + ENDIF + + CALL nchdlerr(nf90_inq_dimid(ncid,'num_wgts',dimid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=num_wgts),& + & __LINE__,__MYFILE__) + IF (num_wgts/=1) THEN + WRITE(0,*)'parinter_read: num_wgts has to be 1 for now' + CALL abort + ENDIF + + ! If nsendtot is not present we assume it to be zero. + + IF (nf90_inq_dimid(ncid,'nsendtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nsendtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nsendtot=0 + ENDIF + + IF(nf90_inq_dimid(ncid,'nrecvtot',dimid)==nf90_noerr) THEN + CALL nchdlerr(nf90_inquire_dimension(ncid,dimid,& + & len=pinfo%nrecvtot),& + & __LINE__,__MYFILE__) + ELSE + pinfo%nrecvtot=0 + ENDIF + + ALLOCATE(pinfo%dst_address(pinfo%num_links),& + & pinfo%src_address(pinfo%num_links),& + & pinfo%remap_matrix(num_wgts,pinfo%num_links),& + & pinfo%nsend(0:nproc-1),& + & pinfo%send_address(pinfo%nsendtot),& + & pinfo%nrecv(0:nproc-1),& + & pinfo%nsdisp(0:nproc-1),& + & pinfo%nrdisp(0:nproc-1)) + + IF (pinfo%num_links>0) THEN + CALL nchdlerr(nf90_inq_varid(ncid,'dst_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%dst_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'src_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%src_address),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'remap_matrix',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%remap_matrix),& + & __LINE__,__MYFILE__) + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nsend',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsend(0:nproc-1)),& + & __LINE__,__MYFILE__) + + IF (pinfo%nsendtot>0) THEN + + CALL nchdlerr(nf90_inq_varid(ncid,'send_address',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%send_address),& + & __LINE__,__MYFILE__) + + ENDIF + + CALL nchdlerr(nf90_inq_varid(ncid,'nrecv',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrecv(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nsdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nsdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_inq_varid(ncid,'nrdisp',varid),& + & __LINE__,__MYFILE__) + CALL nchdlerr(nf90_get_var(ncid,varid,pinfo%nrdisp(0:nproc-1)),& + & __LINE__,__MYFILE__) + + CALL nchdlerr(nf90_close(ncid),__LINE__, __MYFILE__ ) + + ENDIF + + END SUBROUTINE parinter_read + +END MODULE parinter + +MODULE interinfo + + ! Parallel regridding information + + USE parinter + + IMPLICIT NONE + + SAVE + + ! IFS to NEMO + + TYPE(parinterinfo) :: gausstoT,gausstoUV + + ! NEMO to IFS + + TYPE(parinterinfo) :: Ttogauss, UVtogauss + + ! Read parinterinfo on task 0 only and broadcast. + + LOGICAL :: lparbcast = .FALSE. + +END MODULE interinfo diff --git a/src/ifs_interface/ifs_notused.F90 b/src/ifs_interface/ifs_notused.F90 new file mode 100644 index 000000000..7d8603248 --- /dev/null +++ b/src/ifs_interface/ifs_notused.F90 @@ -0,0 +1,374 @@ +! Routines usually provided by the library that are currently +! not implemented for FESOM2. +! +! -Original code by Kristian Mogensen, ECMWF. + +SUBROUTINE nemogcmcoup_init_ioserver( icomm, lnemoioserver ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver + + +SUBROUTINE nemogcmcoup_init_ioserver_2( icomm ) + + ! Initialize the NEMO mppio server + + IMPLICIT NONE + INTEGER :: icomm + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_init_ioserver_2 + + +SUBROUTINE nemogcmcoup_mlflds_get( mype, npes, icomm, & + & nlev, nopoints, pgt3d, pgs3d, pgu3d, pgv3d ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints,nlev) :: pgt3d, pgs3d, pgu3d, pgv3d + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints,nlev + + ! Local variables + + WRITE(0,*)'nemogcmcoup_mlflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlflds_get + + +SUBROUTINE nemogcmcoup_get( mype, npes, icomm, & + & nopoints, pgsst, pgice, pgucur, pgvcur ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice and currents + REAL(wpIFS), DIMENSION(nopoints) :: pgsst, pgice, pgucur, pgvcur + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get should not be called with FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_get + + +SUBROUTINE nemogcmcoup_exflds_get( mype, npes, icomm, & + & nopoints, pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 ) + + ! Interpolate sst, ice: surf T; albedo; concentration; thickness, + ! snow thickness and currents from the ORCA grid to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + REAL(wpIFS), DIMENSION(nopoints) :: pgssh, pgmld, pg20d, pgsss, & + & pgtem300, pgsal300 + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number Gaussian grid points + INTEGER, INTENT(IN) :: nopoints + + ! Local variables + + WRITE(0,*)'nemogcmcoup_exflds_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_exflds_get + + +SUBROUTINE nemogcmcoup_get_1way( mype, npes, icomm ) + + ! Interpolate sst, ice and currents from the ORCA grid + ! to the Gaussian grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + IMPLICIT NONE + + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + + ! Local variables + + WRITE(0,*)'nemogcmcoup_get_1way should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_get_1way + + +SUBROUTINE nemogcmcoup_mlinit( mype, npes, icomm, & + & nlev, nopoints, pdep, pmask ) + + ! Get information about the vertical discretization of the ocean model + + ! nlevs are maximum levels on input and actual number levels on output + + USE par_kind + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Grid information + INTEGER, INTENT(INOUT) :: nlev, nopoints + REAL(wpIFS), INTENT(OUT), DIMENSION(nlev) :: pdep + REAL(wpIFS), INTENT(OUT), DIMENSION(nopoints,nlev) :: pmask + + ! Local variables + + ! dummy argument with explicit INTENT(OUT) declaration needs an explicit value + pdep=0. + pmask=0. + + WRITE(0,*)'nemogcmcoup_mlinit should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_mlinit + + +SUBROUTINE nemogcmcoup_update( mype, npes, icomm, & + & npoints, pgutau, pgvtau, & + & pgqsr, pgqns, pgemp, kt, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Fluxes on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & + & pgutau, pgvtau, pgqsr, pgqns, pgemp + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update should be called with with.' + CALL abort + +END SUBROUTINE nemogcmcoup_update + +SUBROUTINE nemogcmcoup_update_add( mype, npes, icomm, & + & npoints, pgsst, pgtsk, kt, ldebug ) + + ! Update addetiona in nemogcmcoup_data by parallel + ! interpolation of the input gaussian grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Input on the Gaussian grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), intent(IN) :: & + & pgsst, pgtsk + ! Current time step + INTEGER, INTENT(in) :: kt + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_update_add should not be called when coupling to fesom. Commented ABORT. Proceeding...' + !CALL abort + + +END SUBROUTINE nemogcmcoup_update_add + + +SUBROUTINE nemogcmcoup_wam_coupinit( mype, npes, icomm, & + & nlocpoints, nglopoints, & + & nlocmsk, ngloind, iunit ) + + ! Initialize single executable coupling between WAM and NEMO + ! This is called from WAM. + + IMPLICIT NONE + + ! Input arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype,npes,icomm + ! WAM grid information + ! Number of local and global points + INTEGER, INTENT(IN) :: nlocpoints, nglopoints + ! Integer mask and global indices + INTEGER, DIMENSION(nlocpoints), INTENT(IN) :: nlocmsk, ngloind + ! Unit for output in parinter_init + INTEGER :: iunit + + WRITE(0,*)'Wam coupling not implemented for FESOM' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_coupinit + + +SUBROUTINE nemogcmcoup_wam_get( mype, npes, icomm, & + & nopoints, pwsst, pwicecov, pwicethk, & + & pwucur, pwvcur, licethk ) + + ! Interpolate from the ORCA grid + ! to the WAM grid. + + ! This routine can be called at any point in time since it does + ! the necessary message passing in parinter_fld. + + USE par_kind + IMPLICIT NONE + + ! Arguments + + ! Message passing information + INTEGER, INTENT(IN) :: mype, npes, icomm + ! Number WAM grid points + INTEGER, INTENT(IN) :: nopoints + ! Local arrays of sst, ice cover, ice thickness and currents + REAL(wpIFS), DIMENSION(nopoints) :: pwsst, pwicecov, pwicethk, pwucur, pwvcur + LOGICAL :: licethk + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_get should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_get + + +SUBROUTINE nemogcmcoup_wam_update( mype, npes, icomm, & + & npoints, pwswh, pwmwp, & + & pwphioc, pwtauoc, pwstrn, & + & pwustokes, pwvstokes, & + & cdtpro, ldebug ) + + ! Update fluxes in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & + & pwswh, pwmwp, pwphioc, pwtauoc, pwstrn, pwustokes, pwvstokes + ! Current time + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update + + +SUBROUTINE nemogcmcoup_wam_update_stress( mype, npes, icomm, npoints, & + & pwutau, pwvtau, pwuv10n, pwphif,& + & cdtpro, ldebug ) + + ! Update stresses in nemogcmcoup_data by parallel + ! interpolation of the input WAM grid data + + USE par_kind + + IMPLICIT NONE + + ! Arguments + + ! MPI communications + INTEGER, INTENT(IN) :: mype,npes,icomm + ! Data on the WAM grid. + INTEGER, INTENT(IN) :: npoints + REAL(wpIFS), DIMENSION(npoints), INTENT(IN) :: & + & pwutau, pwvtau, pwuv10n, pwphif + ! Current time step + CHARACTER(len=14), INTENT(IN) :: cdtpro + ! Write debugging fields in netCDF + LOGICAL, INTENT(IN) :: ldebug + + ! Local variables + + WRITE(0,*)'nemogcmcoup_wam_update_stress should not be called when coupling to fesom.' + CALL abort + +END SUBROUTINE nemogcmcoup_wam_update_stress + +SUBROUTINE nemogcmcoup_end_ioserver + + ! Close io servers + + IMPLICIT NONE + INTEGER :: icomm + LOGICAL :: lnemoioserver + + WRITE(*,*)'No mpp_ioserver' + CALL abort + +END SUBROUTINE nemogcmcoup_end_ioserver + diff --git a/src/info_module.F90 b/src/info_module.F90 new file mode 100644 index 000000000..e3c96a5cb --- /dev/null +++ b/src/info_module.F90 @@ -0,0 +1,102 @@ +module info_module +! synopsis: query information from FESOM + + implicit none + public info + private + + type :: info_type + contains + procedure, nopass :: print_definitions + end type + type(info_type) info + +contains + + ! this is a list of preprocessor definitions from the FESOM Fortran source files + ! it will probably become outdated at some point and should be reviewed + ! the result will reflect the status of definitions as they are set when *this file* had been compiled + subroutine print_definitions() +#ifdef __icepack + print '(g0)', '__icepack is ON' +#else + print '(g0)', '__icepack is OFF' +#endif +#ifdef __oasis + print '(g0)', '__oasis is ON' +#else + print '(g0)', '__oasis is OFF' +#endif +#ifdef __oifs + print '(g0)', '__oifs is ON' +#else + print '(g0)', '__oifs is OFF' +#endif +#ifdef DEBUG + print '(g0)', 'DEBUG is ON' +#else + print '(g0)', 'DEBUG is OFF' +#endif +#ifdef DISABLE_MULTITHREADING + print '(g0)', 'DISABLE_MULTITHREADING is ON' +#else + print '(g0)', 'DISABLE_MULTITHREADING is OFF' +#endif +#ifdef false + print '(g0)', 'false is ON' +#else + print '(g0)', 'false is OFF' +#endif +#ifdef FVOM_INIT + print '(g0)', 'FVOM_INIT is ON' +#else + print '(g0)', 'FVOM_INIT is OFF' +#endif +#ifdef oifs + print '(g0)', 'oifs is ON' +#else + print '(g0)', 'oifs is OFF' +#endif +#ifdef OMP_MAX_THREADS + print '(g0)', 'OMP_MAX_THREADS is ON' +#else + print '(g0)', 'OMP_MAX_THREADS is OFF' +#endif +#ifdef PARMS + print '(g0)', 'PARMS is ON' +#else + print '(g0)', 'PARMS is OFF' +#endif +#ifdef PETSC + print '(g0)', 'PETSC is ON' +#else + print '(g0)', 'PETSC is OFF' +#endif +#ifdef use_cavity + print '(g0)', 'use_cavity is ON' +#else + print '(g0)', 'use_cavity is OFF' +#endif +#ifdef use_fullfreesurf + print '(g0)', 'use_fullfreesurf is ON' +#else + print '(g0)', 'use_fullfreesurf is OFF' +#endif +#ifdef VERBOSE + print '(g0)', 'VERBOSE is ON' +#else + print '(g0)', 'VERBOSE is OFF' +#endif +#ifdef DISABLE_PARALLEL_RESTART_READ + print '(g0)', 'DISABLE_PARALLEL_RESTART_READ is ON' +#else + print '(g0)', 'DISABLE_PARALLEL_RESTART_READ is OFF' +#endif +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + print '(g0)', 'ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS is ON' +#else + print '(g0)', 'ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS is OFF' +#endif + end subroutine + +end module diff --git a/src/io_blowup.F90 b/src/io_blowup.F90 index 27dcf686c..60832dd37 100644 --- a/src/io_blowup.F90 +++ b/src/io_blowup.F90 @@ -1,12 +1,15 @@ MODULE io_BLOWUP use g_config use g_clock - use g_parsup use g_comm_auto - USE MOD_MESH - use o_arrays - use i_arrays - implicit none + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE MOD_DYN + USE MOD_ICE + use o_arrays + implicit none #include "netcdf.inc" !___________________________________________________________________________ type nc_dims @@ -62,10 +65,14 @@ MODULE io_BLOWUP !_______________________________________________________________________________ ! ini_ocean_io initializes bid datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! - subroutine ini_blowup_io(year, mesh) + subroutine ini_blowup_io(year, ice, dynamics, tracers, partit, mesh) implicit none - type(t_mesh), intent(in) , target :: mesh integer, intent(in) :: year + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice integer :: ncid, j integer :: varid character(500) :: longname @@ -73,7 +80,10 @@ subroutine ini_blowup_io(year, mesh) character(500) :: trname, units character(4) :: cyear -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if(mype==0) write(*,*)' --> Init. blowpup file ' write(cyear,'(i4)') year @@ -91,22 +101,22 @@ subroutine ini_blowup_io(year, mesh) !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(bid, 'eta_n' , (/nod2D/) , 'sea surface elevation', 'm', eta_n); - call def_variable(bid, 'd_eta' , (/nod2D/) , 'change in ssh from solver', 'm', d_eta); + call def_variable(bid, 'eta_n' , (/nod2D/) , 'sea surface elevation', 'm', dynamics%eta_n); + call def_variable(bid, 'd_eta' , (/nod2D/) , 'change in ssh from solver', 'm', dynamics%d_eta); !___ALE related fields______________________________________________________ call def_variable(bid, 'hbar' , (/nod2D/) , 'ALE surface elevation hbar_n+0.5', 'm', hbar); !!PS call def_variable(bid, 'hbar_old' , (/nod2D/) , 'ALE surface elevation hbar_n-0.5', 'm', hbar_old); - call def_variable(bid, 'ssh_rhs' , (/nod2D/) , 'RHS for the elevation', '?', ssh_rhs); - call def_variable(bid, 'ssh_rhs_old', (/nod2D/) , 'RHS for the elevation', '?', ssh_rhs_old); + call def_variable(bid, 'ssh_rhs' , (/nod2D/) , 'RHS for the elevation', '?', dynamics%ssh_rhs); + call def_variable(bid, 'ssh_rhs_old', (/nod2D/) , 'RHS for the elevation', '?', dynamics%ssh_rhs_old); !___Define the netCDF variables for 3D fields_______________________________ call def_variable(bid, 'hnode' , (/nl-1, nod2D/) , 'ALE stuff', '?', hnode); call def_variable(bid, 'helem' , (/nl-1, elem2D/) , 'Element layer thickness', 'm/s', helem(:,:)); - call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV(1,:,:)); - call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV(2,:,:)); - call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', UV_rhs(1,:,:)); - call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', UV_rhs(2,:,:)); - call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); - call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); + call def_variable(bid, 'u' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv(1,:,:)); + call def_variable(bid, 'v' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv(2,:,:)); + call def_variable(bid, 'u_rhs' , (/nl-1, elem2D/) , 'zonal velocity', 'm/s', dynamics%uv_rhs(1,:,:)); + call def_variable(bid, 'v_rhs' , (/nl-1, elem2D/) , 'meridional velocity', 'm/s', dynamics%uv_rhs(2,:,:)); + call def_variable(bid, 'urhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:)); + call def_variable(bid, 'vrhs_AB' , (/nl-1, elem2D/) , 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:)); call def_variable(bid, 'zbar_n_bot' , (/nod2D/) , 'node bottom depth', 'm', zbar_n_bot); call def_variable(bid, 'zbar_e_bot' , (/elem2d/) , 'elem bottom depth', 'm', zbar_e_bot); call def_variable(bid, 'bottom_node_thickness' , (/nod2D/) , 'node bottom thickness', 'm', bottom_node_thickness); @@ -115,7 +125,7 @@ subroutine ini_blowup_io(year, mesh) !!PS call def_variable(bid, 'pgf_y' , (/nl-1, elem2D/) , 'meridional pressure gradient force', '???', pgf_y(:,:)); !!PS call def_variable(bid, 'density_m_rho0' , (/nl-1, nod2D/) , 'density minus rho0', '???', density_m_rho0(:,:)); - do j=1,num_tracers + do j=1, tracers%num_tracers SELECT CASE (j) CASE(1) trname='temp' @@ -130,22 +140,22 @@ subroutine ini_blowup_io(year, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call def_variable(bid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tr_arr(:,:,j)); + call def_variable(bid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%values(:,:)); !!PS longname=trim(longname)//', Adams–Bashforth' -!!PS call def_variable(bid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tr_arr_old(:,:,j)); +!!PS call def_variable(bid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tracers%data(j)%valuesAB(:,:)(:,:)); end do - call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel); - call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_e); - call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', Wvel_i); - call def_variable(bid, 'cfl_z' , (/nl-1, nod2D/) , 'vertical CFL criteria', '', CFL_z); + call def_variable(bid, 'w' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w); + call def_variable(bid, 'w_expl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_e); + call def_variable(bid, 'w_impl' , (/nl, nod2D/) , 'vertical velocity', 'm/s', dynamics%w_i); + call def_variable(bid, 'cfl_z' , (/nl, nod2D/) , 'vertical CFL criteria', '', dynamics%cfl_z); !_____________________________________________________________________________ ! write snapshot ice variables to blowup file - call def_variable(bid, 'a_ice' , (/nod2D/) , 'ice concentration [0 to 1]', '%', a_ice); - call def_variable(bid, 'm_ice' , (/nod2D/) , 'effective ice thickness', 'm', m_ice); - call def_variable(bid, 'm_snow' , (/nod2D/) , 'effective snow thickness', 'm', m_snow); - call def_variable(bid, 'u_ice' , (/nod2D/) , 'zonal velocity', 'm/s', u_ice); - call def_variable(bid, 'v_ice' , (/nod2D/) , 'meridional velocity', 'm', v_ice); + call def_variable(bid, 'a_ice' , (/nod2D/) , 'ice concentration [0 to 1]', '%', ice%data(1)%values); + call def_variable(bid, 'm_ice' , (/nod2D/) , 'effective ice thickness', 'm', ice%data(2)%values); + call def_variable(bid, 'm_snow' , (/nod2D/) , 'effective snow thickness', 'm', ice%data(3)%values); + call def_variable(bid, 'u_ice' , (/nod2D/) , 'zonal velocity', 'm/s', ice%uice); + call def_variable(bid, 'v_ice' , (/nod2D/) , 'meridional velocity', 'm', ice%vice); !!PS call def_variable(bid, 'a_ice_old' , (/nod2D/) , 'ice concentration [0 to 1]', '%', a_ice_old); !PS !!PS call def_variable(bid, 'm_ice_old' , (/nod2D/) , 'effective ice thickness', 'm', m_ice_old); !PS !!PS call def_variable(bid, 'm_snow_old' , (/nod2D/) , 'effective snow thickness', 'm', m_snow_old); !PS @@ -166,36 +176,40 @@ end subroutine ini_blowup_io ! ! !_______________________________________________________________________________ - subroutine blowup(istep, mesh) + subroutine blowup(istep, ice, dynamics, tracers, partit, mesh) implicit none - type(t_mesh), intent(in) , target :: mesh - integer :: istep + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice + integer :: istep ctime=timeold+(dayold-1.)*86400 - call ini_blowup_io(yearnew, mesh) - if(mype==0) write(*,*)'Do output (netCDF, blowup) ...' - if(mype==0) write(*,*)' --> call assoc_ids(bid)' - call assoc_ids(bid) ; call was_error(bid) - if(mype==0) write(*,*)' --> call write_blowup(bid, istep)' - call write_blowup(bid, istep, mesh) ; call was_error(bid) + call ini_blowup_io(yearnew, ice, dynamics, tracers, partit, mesh) + if(partit%mype==0) write(*,*)'Do output (netCDF, blowup) ...' + if(partit%mype==0) write(*,*)' --> call assoc_ids(bid)' + call assoc_ids(bid, partit) ; call was_error(bid, partit) + if(partit%mype==0) write(*,*)' --> call write_blowup(bid, istep)' + call write_blowup(bid, istep, partit, mesh) ; call was_error(bid, partit) end subroutine blowup ! ! !_______________________________________________________________________________ - subroutine create_new_file(id) + subroutine create_new_file(id, partit) implicit none - + type(t_partit), intent(inout), target :: partit type(nc_file), intent(inout) :: id integer :: c, j integer :: n, k, l, kdim, dimid(4) character(2000) :: att_text ! Serial output implemented so far - if (mype/=0) return + if (partit%mype/=0) return c=1 id%error_status=0 ! create an ocean output file - if(mype==0) write(*,*) 'initializing blowup file ', trim(id%filename) + if(partit%mype==0) write(*,*) 'initializing blowup file ', trim(id%filename) id%error_status(c) = nf_create(id%filename, IOR(NF_NOCLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), id%ncid); c=c+1 do j=1, id%ndim @@ -276,7 +290,7 @@ subroutine def_variable_1d(id, name, dims, longname, units, data) character(len=*), intent(in) :: name integer, intent(in) :: dims(1) character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:) + real(kind=WP),target, intent(in) :: data(:) integer :: c type(nc_vars), allocatable, dimension(:) :: temp @@ -312,7 +326,7 @@ subroutine def_variable_2d(id, name, dims, longname, units, data) character(len=*), intent(in) :: name integer, intent(in) :: dims(2) character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:,:) + real(kind=WP),target, intent(in) :: data(:,:) integer :: c type(nc_vars), allocatable, dimension(:) :: temp @@ -342,16 +356,20 @@ end subroutine def_variable_2d ! ! !_______________________________________________________________________________ - subroutine write_blowup(id, istep, mesh) + subroutine write_blowup(id, istep, partit, mesh) implicit none type(nc_file), intent(inout) :: id integer, intent(in) :: istep real(kind=WP), allocatable :: aux1(:), aux2(:,:) integer :: i, size1, size2, shape integer :: c - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! Serial output implemented so far if (mype==0) then @@ -369,8 +387,8 @@ subroutine write_blowup(id, istep, mesh) if (shape==1) then size1=id%var(i)%dims(1) if (mype==0) allocate(aux1(size1)) - if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux1) - if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux1) + if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux1, partit) + if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux1, partit) if (mype==0) then id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/1, id%rec_count/), (/size1, 1/), aux1, 1); c=c+1 end if @@ -380,37 +398,37 @@ subroutine write_blowup(id, istep, mesh) size1=id%var(i)%dims(1) size2=id%var(i)%dims(2) if (mype==0) allocate(aux2(size1, size2)) - if (size1==nod2D .or. size2==nod2D) call gather_nod (id%var(i)%pt2, aux2) - if (size1==elem2D .or. size2==elem2D) call gather_elem(id%var(i)%pt2, aux2) + if (size1==nod2D .or. size2==nod2D) call gather_nod (id%var(i)%pt2, aux2, partit) + if (size1==elem2D .or. size2==elem2D) call gather_elem(id%var(i)%pt2, aux2, partit) if (mype==0) then id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/1, 1, id%rec_count/), (/size1, size2, 1/), aux2, 2); c=c+1 end if if (mype==0) deallocate(aux2) else if (mype==0) write(*,*) 'not supported shape of array in restart file' - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if end do if (mype==0) id%error_count=c-1 - call was_error(id) + call was_error(id, partit) if (mype==0) id%error_status(1)=nf_close(id%ncid); id%error_count=1 - call was_error(id) + call was_error(id, partit) end subroutine write_blowup ! ! !_______________________________________________________________________________ - subroutine assoc_ids(id) + subroutine assoc_ids(id, partit) implicit none - + type(t_partit), intent(inout) :: partit type(nc_file), intent(inout) :: id character(500) :: longname integer :: c, j, k real(kind=WP) :: rtime !timestamp of the record ! Serial output implemented so far - if (mype/=0) return + if (partit%mype/=0) return c=1 id%error_status=0 ! open existing netcdf file @@ -419,7 +437,7 @@ subroutine assoc_ids(id) id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid) !if the file does not exist it will be created! if (id%error_status(c) .ne. nf_noerr) then - call create_new_file(id) ! error status counter will be reset + call create_new_file(id, partit) ! error status counter will be reset c=id%error_count+1 id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid); c=c+1 end if @@ -446,9 +464,9 @@ subroutine assoc_ids(id) exit ! a proper rec_count detected, ready for reading restart, exit the loop end if if (k==1) then - if (mype==0) write(*,*) 'WARNING: all dates in restart file are after the current date' - if (mype==0) write(*,*) 'reading restart will not be possible !' - if (mype==0) write(*,*) 'the model attempted to start with the time stamp = ', int(ctime) + if (partit%mype==0) write(*,*) 'WARNING: all dates in restart file are after the current date' + if (partit%mype==0) write(*,*) 'reading restart will not be possible !' + if (partit%mype==0) write(*,*) 'the model attempted to start with the time stamp = ', int(ctime) id%error_status(c)=-310; end if end do @@ -465,20 +483,21 @@ end subroutine assoc_ids ! ! !_______________________________________________________________________________ - subroutine was_error(id) + subroutine was_error(id, partit) implicit none - type(nc_file), intent(inout) :: id - integer :: k, status, ierror - - call MPI_BCast(id%error_count, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(id%error_status(1), id%error_count, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + type(nc_file), intent(inout) :: id + type(t_partit), intent(inout) :: partit + integer :: k, status, ierror + + call MPI_BCast(id%error_count, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + call MPI_BCast(id%error_status(1), id%error_count, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) do k=1, id%error_count status=id%error_status(k) if (status .ne. nf_noerr) then - if (mype==0) write(*,*) 'error counter=', k - if (mype==0) call handle_err(status) - call par_ex + if (partit%mype==0) write(*,*) 'error counter=', k + if (partit%mype==0) call handle_err(status, partit) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop end if end do diff --git a/src/io_fesom_file.F90 b/src/io_fesom_file.F90 new file mode 100644 index 000000000..21b599ee1 --- /dev/null +++ b/src/io_fesom_file.F90 @@ -0,0 +1,525 @@ + ! synopsis: generic implementation to asynchronously read/write FESOM mesh variable(s) with distributed cell or element data in 2D or 3D to/from a NetCDF file +module io_fesom_file_module + use io_netcdf_file_module + use async_threads_module + use MOD_PARTIT + implicit none + public fesom_file_type + private + + + type var_info + integer var_index + real(kind=8), pointer :: external_local_data_ptr(:,:) => null() + real(kind=8), allocatable, dimension(:,:) :: local_data_copy + real(kind=8), allocatable :: global_level_data(:) + integer :: global_level_data_size = 0 + logical is_elem_based + character(:), allocatable :: varname ! todo: maybe use a getter in netcdf_file_type to get the name + end type + + + type dim_info + integer idx + integer len ! better query the len from the netcdf_file_type ? + end type + + + type, extends(netcdf_file_type) :: fesom_file_type ! todo maybe: do not inherit but use composition to have different implementations for the iorank and non-io ranks + private + integer time_dimidx + integer time_varidx + type(var_info) var_infos(20); integer :: nvar_infos = 0 ! todo: allow dynamically allocated size without messing with shallow copied pointers + type(dim_info), allocatable :: used_mesh_dims(:) ! the dims we add for our variables, we need to identify them when adding our mesh related variables + integer :: rec_cnt = -1 + integer :: iorank = 0 + integer :: fesom_file_index + type(thread_type) thread + logical :: thread_running = .false. + integer :: comm + type(t_partit), pointer :: partit + logical gather_and_write + contains + procedure, public :: async_read_and_scatter_variables, async_gather_and_write_variables, join, init, is_iorank, rec_count, time_varindex, time_dimindex + procedure, public :: read_variables_raw, write_variables_raw + procedure, public :: close_file ! inherited procedures we overwrite + generic, public :: specify_node_var => specify_node_var_2d, specify_node_var_3d + generic, public :: specify_elem_var => specify_elem_var_2d, specify_elem_var_3d + procedure, private :: specify_node_var_2d, specify_node_var_3d + procedure, private :: specify_elem_var_2d, specify_elem_var_3d + procedure, private :: read_and_scatter_variables, gather_and_write_variables + end type + + + integer, save :: m_nod2d + integer, save :: m_elem2d + integer, save :: m_nl + + + type fesom_file_type_ptr + class(fesom_file_type), pointer :: ptr + end type + type(fesom_file_type_ptr), allocatable, save :: all_fesom_files(:) + + +contains + + + function is_iorank(this) result(x) + class(fesom_file_type), intent(in) :: this + logical x + x = (this%partit%mype == this%iorank) + end function + + + ! return the number of timesteps of the file if a file is attached or return the default value of -1 + function rec_count(this) result(x) + class(fesom_file_type), intent(inout) :: this + integer x + ! EO parameters + integer, allocatable :: time_shape(:) + + if(this%rec_cnt == -1 .and. this%is_attached()) then + ! update from file if rec_cnt has never been used before + call this%read_var_shape(this%time_varidx, time_shape) + this%rec_cnt = time_shape(1) + end if + + x = this%rec_cnt + end function + + + function time_varindex(this) result(x) + class(fesom_file_type), intent(in) :: this + integer x + x = this%time_varidx + end function + + + function time_dimindex(this) result(x) + class(fesom_file_type), intent(in) :: this + integer x + x = this%time_dimidx + end function + + + subroutine init(this, mesh_nod2d, mesh_elem2d, mesh_nl, partit) ! todo: would like to call it initialize but Fortran is rather cluncky with overwriting base type procedures + use io_netcdf_workaround_module + use io_gather_module + use MOD_PARTIT + class(fesom_file_type), target, intent(inout) :: this + integer mesh_nod2d + integer mesh_elem2d + integer mesh_nl + type(t_partit), target :: partit + ! EO parameters + type(fesom_file_type_ptr), allocatable :: tmparr(:) + logical async_netcdf_allowed + integer err + integer provided_mpi_thread_support_level + + call init_io_gather(partit) + + ! get hold of our mesh data for later use (assume the mesh instance will not change) + m_nod2d = mesh_nod2d + m_elem2d = mesh_elem2d + m_nl = mesh_nl + call this%netcdf_file_type%initialize() + + allocate(this%used_mesh_dims(0)) + + this%time_dimidx = this%add_dim_unlimited('time') + + this%time_varidx = this%add_var_double('time', [this%time_dimidx]) + + ! add this instance to global array + ! the array is being used to identify the instance in an async call + if( .not. allocated(all_fesom_files)) then + allocate(all_fesom_files(1)) + else + allocate( tmparr(size(all_fesom_files)+1) ) + tmparr(1:size(all_fesom_files)) = all_fesom_files + deallocate(all_fesom_files) + call move_alloc(tmparr, all_fesom_files) + end if + all_fesom_files(size(all_fesom_files))%ptr => this + this%fesom_file_index = size(all_fesom_files) + + this%partit => partit + ! set up async output + + this%iorank = next_io_rank(partit%MPI_COMM_FESOM, async_netcdf_allowed, partit) + + call MPI_Comm_dup(partit%MPI_COMM_FESOM, this%comm, err) + + call this%thread%initialize(async_worker, this%fesom_file_index) + if(.not. async_netcdf_allowed) call this%thread%disable_async() + + ! check if we have multi thread support available in the MPI library + ! tough MPI_THREAD_FUNNELED should be enough here, at least on cray-mpich 7.5.3 async mpi calls fail if we do not have support level 'MPI_THREAD_MULTIPLE' + ! on cray-mpich we only get level 'MPI_THREAD_MULTIPLE' if 'MPICH_MAX_THREAD_SAFETY=multiple' is set in the environment + call MPI_Query_thread(provided_mpi_thread_support_level, err) + if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call this%thread%disable_async() + end subroutine + + + subroutine read_and_scatter_variables(this) + use io_scatter_module + class(fesom_file_type), target :: this + ! EO parameters + integer i,lvl, nlvl + logical is_2d + integer last_rec_idx + type(var_info), pointer :: var + real(kind=8), allocatable :: laux(:) + integer mpierr + + last_rec_idx = this%rec_count() + + do i=1, this%nvar_infos + var => this%var_infos(i) + + nlvl = size(var%external_local_data_ptr,dim=1) + is_2d = (nlvl == 1) + allocate(laux( size(var%external_local_data_ptr,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D + + if(this%is_iorank()) then + ! todo: choose how many levels we read at once + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( var%global_level_data_size )) + else + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( 0 )) + end if + + do lvl=1, nlvl +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + ! aleph cray-mpich workaround + call MPI_Barrier(this%comm, mpierr) +#endif + if(this%is_iorank()) then + if(is_2d) then + call this%read_var(var%var_index, [1,last_rec_idx], [size(var%global_level_data),1], var%global_level_data) + else + call this%read_var(var%var_index, [1,lvl,last_rec_idx], [size(var%global_level_data),1,1], var%global_level_data) + end if + end if + + if(var%is_elem_based) then + call scatter_elem2D(var%global_level_data, laux, this%iorank, this%comm, this%partit) + else + call scatter_nod2D(var%global_level_data, laux, this%iorank, this%comm, this%partit) + end if + ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI + var%external_local_data_ptr(lvl,:) = laux ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) + end do + deallocate(laux) + end do + end subroutine + + + subroutine gather_and_write_variables(this) + use io_gather_module + class(fesom_file_type), target :: this + ! EO parameters + integer i,lvl, nlvl + logical is_2d + real(kind=8), allocatable :: laux(:) + type(var_info), pointer :: var + integer mpierr + + if(this%is_iorank()) this%rec_cnt = this%rec_count()+1 + + do i=1, this%nvar_infos + var => this%var_infos(i) + + nlvl = size(var%local_data_copy,dim=1) + is_2d = (nlvl == 1) + allocate(laux( size(var%local_data_copy,dim=2) )) ! i.e. myDim_elem2D+eDim_elem2D or myDim_nod2D+eDim_nod2D + + if(this%is_iorank()) then + ! todo: choose how many levels we write at once + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( var%global_level_data_size )) + else + if(.not. allocated(var%global_level_data)) allocate(var%global_level_data( 0 )) + end if + + do lvl=1, nlvl +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + ! aleph cray-mpich workaround + call MPI_Barrier(this%comm, mpierr) +#endif + ! the data from our pointer is not contiguous (if it is 3D data), so we can not pass the pointer directly to MPI + laux = var%local_data_copy(lvl,:) ! todo: remove this buffer and pass the data directly to MPI (change order of data layout to be levelwise or do not gather levelwise but by columns) + + if(var%is_elem_based) then + call gather_elem2D(laux, var%global_level_data, this%iorank, 42, this%comm, this%partit) + else + call gather_nod2D (laux, var%global_level_data, this%iorank, 42, this%comm, this%partit) + end if + + if(this%is_iorank()) then + if(is_2d) then + call this%write_var(var%var_index, [1,this%rec_cnt], [size(var%global_level_data),1], var%global_level_data) + else + call this%write_var(var%var_index, [1,lvl,this%rec_cnt], [size(var%global_level_data),1,1], var%global_level_data) + end if + end if + end do + deallocate(laux) + end do + + if(this%is_iorank()) call this%flush_file() ! flush the file to disk after each write + end subroutine + + + subroutine read_variables_raw(this, fileunit) + class(fesom_file_type), target :: this + integer, intent(in) :: fileunit + ! EO parameters + integer i + type(var_info), pointer :: var + integer status + + do i=1, this%nvar_infos + var => this%var_infos(i) + read(fileunit) var%external_local_data_ptr ! directly use external_local_data_ptr, use the local_data_copy only when called asynchronously + end do + end subroutine + + + subroutine write_variables_raw(this, fileunit) + class(fesom_file_type), target :: this + integer, intent(in) :: fileunit + ! EO parameters + integer i + type(var_info), pointer :: var + + do i=1, this%nvar_infos + var => this%var_infos(i) + write(fileunit) var%external_local_data_ptr ! directly use external_local_data_ptr, use the local_data_copy only when called asynchronously + end do + end subroutine + + + subroutine join(this) + class(fesom_file_type) this + ! EO parameters + + if(this%thread_running) call this%thread%join() + this%thread_running = .false. + end subroutine + + + subroutine async_read_and_scatter_variables(this) + class(fesom_file_type), target :: this + + call assert(.not. this%thread_running, __LINE__) + + this%gather_and_write = .false. + call this%thread%run() + this%thread_running = .true. + end subroutine + + + subroutine async_gather_and_write_variables(this) + class(fesom_file_type), target :: this + ! EO parameters + integer i + type(var_info), pointer :: var + + call assert(.not. this%thread_running, __LINE__) + + ! copy data so we can write the current values asynchronously + do i=1, this%nvar_infos + var => this%var_infos(i) + if(.not. allocated(var%local_data_copy)) allocate( var%local_data_copy(size(var%external_local_data_ptr,dim=1), size(var%external_local_data_ptr,dim=2)) ) + var%local_data_copy = var%external_local_data_ptr + end do + + this%gather_and_write = .true. + call this%thread%run() + this%thread_running = .true. + end subroutine + + + subroutine async_worker(fesom_file_index) + integer, intent(in) :: fesom_file_index + ! EO parameters + type(fesom_file_type), pointer :: f + + f => all_fesom_files(fesom_file_index)%ptr + + if(f%gather_and_write) then + call f%gather_and_write_variables() + else + call f%read_and_scatter_variables() + end if + end subroutine + + + ! use separate procedures to specify node based or element based variables + ! if we would otherwise specify the vars only via the sizes of their dimensions, + ! we have to assign the corresponding dimindx somewhere else, which would be error prone + subroutine specify_node_var_2d(this, name, longname, units, local_data) + use, intrinsic :: ISO_C_BINDING + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + ! EO parameters + real(8), pointer :: external_local_data_ptr(:,:) + type(dim_info) level_diminfo + + level_diminfo = obtain_diminfo(this, m_nod2d) + + external_local_data_ptr(1:1,1:size(local_data)) => local_data(:) + call specify_variable(this, name, [level_diminfo%idx, this%time_dimidx], level_diminfo%len, external_local_data_ptr, .false., longname, units) + end subroutine + + + subroutine specify_node_var_3d(this, name, longname, units, local_data) + use, intrinsic :: ISO_C_BINDING + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + ! EO parameters + type(dim_info) level_diminfo, depth_diminfo + + level_diminfo = obtain_diminfo(this, m_nod2d) + depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) + + call specify_variable(this, name, [level_diminfo%idx, depth_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .false., longname, units) + end subroutine + + + subroutine specify_elem_var_2d(this, name, longname, units, local_data) + use, intrinsic :: ISO_C_BINDING + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + ! EO parameters + real(8), pointer :: external_local_data_ptr(:,:) + type(dim_info) level_diminfo + + level_diminfo = obtain_diminfo(this, m_elem2d) + + external_local_data_ptr(1:1,1:size(local_data)) => local_data(:) + call specify_variable(this, name, [level_diminfo%idx, this%time_dimidx], level_diminfo%len, external_local_data_ptr, .true., longname, units) + end subroutine + + + subroutine specify_elem_var_3d(this, name, longname, units, local_data) + use, intrinsic :: ISO_C_BINDING + class(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + ! EO parameters + type(dim_info) level_diminfo, depth_diminfo + + level_diminfo = obtain_diminfo(this, m_elem2d) + depth_diminfo = obtain_diminfo(this, size(local_data, dim=1)) + + call specify_variable(this, name, [level_diminfo%idx, depth_diminfo%idx, this%time_dimidx], level_diminfo%len, local_data, .true., longname, units) + end subroutine + + + function obtain_diminfo(this, len) result(info) + type(fesom_file_type), intent(inout) :: this + type(dim_info) info + integer len + ! EO parameters + integer i + type(dim_info), allocatable :: tmparr(:) + + do i=1, size(this%used_mesh_dims) + if(this%used_mesh_dims(i)%len == len) then + info = this%used_mesh_dims(i) + return + end if + end do + + ! the dim has not been added yet, see if it is one of our allowed mesh related dims + if(len == m_nod2d) then + info = dim_info( idx=this%add_dim('node', len), len=len) + else if(len == m_elem2d) then + info = dim_info( idx=this%add_dim('elem', len), len=len) + else if(len == m_nl-1) then + info = dim_info( idx=this%add_dim('nz_1', len), len=len) + else if(len == m_nl) then + info = dim_info( idx=this%add_dim('nz', len), len=len) + else + print *, "error in line ",__LINE__, __FILE__," can not find dimension with size",len + stop 1 + end if + + ! append the new dim to our list of used dims, i.e. the dims we use for the mesh based variables created via #specify_variable + ! assume the used_mesh_dims array is allocated + allocate( tmparr(size(this%used_mesh_dims)+1) ) + tmparr(1:size(this%used_mesh_dims)) = this%used_mesh_dims + deallocate(this%used_mesh_dims) + call move_alloc(tmparr, this%used_mesh_dims) + this%used_mesh_dims( size(this%used_mesh_dims) ) = info + end function + + + subroutine specify_variable(this, name, dim_indices, global_level_data_size, local_data, is_elem_based, longname, units) + type(fesom_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: dim_indices(:) + integer global_level_data_size + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision? + logical, intent(in) :: is_elem_based + character(len=*), intent(in) :: units, longname + ! EO parameters + integer var_index + + var_index = this%add_var_double(name, dim_indices) + call this%add_var_att(var_index, "units", units) + call this%add_var_att(var_index, "long_name", longname) + + call assert(this%nvar_infos < size(this%var_infos), __LINE__) + this%nvar_infos = this%nvar_infos+1 + this%var_infos(this%nvar_infos)%var_index = var_index + this%var_infos(this%nvar_infos)%external_local_data_ptr => local_data + this%var_infos(this%nvar_infos)%global_level_data_size = global_level_data_size + this%var_infos(this%nvar_infos)%is_elem_based = is_elem_based + this%var_infos(this%nvar_infos)%varname = name + end subroutine + + + subroutine close_file(this) + class(fesom_file_type), intent(inout) :: this + + if(this%thread_running) call this%thread%join() + this%thread_running = .false. + + this%rec_cnt = -1 ! reset state (should probably be done in all the open_ procedures, not here) + call this%netcdf_file_type%close_file() + end subroutine + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO parameters + if(.not. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine + + + subroutine assert_nc(status, line) + integer, intent(in) :: status + integer, intent(in) :: line + ! EO parameters + include "netcdf.inc" + if(status /= nf_noerr) then + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + stop 1 + endif + end subroutine + +end module diff --git a/src/io_gather.F90 b/src/io_gather.F90 index 5aa8f68fb..46079bf92 100644 --- a/src/io_gather.F90 +++ b/src/io_gather.F90 @@ -1,4 +1,6 @@ module io_gather_module + USE MOD_PARTIT + USE MOD_PARSUP implicit none public init_io_gather, gather_nod2D, gather_real4_nod2D, gather_elem2D, gather_real4_elem2D private @@ -14,26 +16,31 @@ module io_gather_module contains - subroutine init_io_gather() - integer err + subroutine init_io_gather(partit) + type(t_partit), intent(inout), target :: partit + ! EO parameters - if(.not. nod2D_lists_initialized) call init_nod2D_lists() - if(.not. elem2D_lists_initialized) call init_elem2D_lists() + if(.not. nod2D_lists_initialized) call init_nod2D_lists(partit) + if(.not. elem2D_lists_initialized) call init_elem2D_lists(partit) end subroutine - subroutine init_nod2D_lists() - use g_PARSUP + subroutine init_nod2D_lists(partit) implicit none + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_part_ass.h" ! EO args ! todo: initialize with the other comm arrays, probably in "init_gatherLists" subroutine if(mype /= 0) then - if(.not. allocated(remPtr_nod2D)) allocate(remPtr_nod2D(npes)) + if(.not. allocated(partit%remPtr_nod2D)) allocate(partit%remPtr_nod2D(npes)) +#include "associate_part_ass.h" end if call MPI_Bcast(remPtr_nod2D, size(remPtr_nod2D), MPI_INTEGER, 0, MPI_COMM_FESOM, MPIerr) if(mype /= 0) then - if(.not. allocated(remList_nod2D)) allocate(remList_nod2D(remPtr_nod2D(npes))) + if(.not. allocated(partit%remList_nod2D)) allocate(partit%remList_nod2D(remPtr_nod2D(npes))) +#include "associate_part_ass.h" end if call MPI_Bcast(remList_nod2D, size(remList_nod2D), MPI_INTEGER, 0, MPI_COMM_FESOM, MPIerr) @@ -52,18 +59,24 @@ subroutine init_nod2D_lists() end subroutine - subroutine init_elem2D_lists() - use g_PARSUP + subroutine init_elem2D_lists(partit) + USE MOD_PARTIT + USE MOD_PARSUP implicit none + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_part_ass.h" ! EO args ! todo: initialize with the other comm arrays, probably in "init_gatherLists" subroutine if(mype /= 0) then - if(.not. allocated(remPtr_elem2D)) allocate(remPtr_elem2D(npes)) + if(.not. allocated(partit%remPtr_elem2D)) allocate(partit%remPtr_elem2D(npes)) +#include "associate_part_ass.h" end if call MPI_Bcast(remPtr_elem2D, size(remPtr_elem2D), MPI_INTEGER, 0, MPI_COMM_FESOM, MPIerr) if(mype /= 0) then - if(.not. allocated(remList_elem2D)) allocate(remList_elem2D(remPtr_elem2D(npes))) + if(.not. allocated(partit%remList_elem2D)) allocate(partit%remList_elem2D(remPtr_elem2D(npes))) +#include "associate_part_ass.h" end if call MPI_Bcast(remList_elem2D, size(remList_elem2D), MPI_INTEGER, 0, MPI_COMM_FESOM, MPIerr) @@ -83,10 +96,12 @@ subroutine init_elem2D_lists() ! thread-safe procedure - subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) - use g_PARSUP + subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) + USE MOD_PARTIT + USE MOD_PARSUP use, intrinsic :: iso_fortran_env, only: real64 implicit none + type(t_partit), intent(inout), target :: partit real(real64), intent(in) :: arr2D(:) real(real64), intent(out) :: arr2D_global(:) integer, intent(in) :: root_rank ! rank of receiving process @@ -97,9 +112,11 @@ subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: remote_node_count = -1 real(real64), allocatable :: sendbuf(:) real(real64), allocatable :: recvbuf(:) ! todo: alloc only for root_rank - integer :: req(npes-1) + integer :: req(partit%npes-1) integer :: request_index integer :: mpi_precision = MPI_DOUBLE_PRECISION +#include "associate_part_def.h" +#include "associate_part_ass.h" if(.not. nod2D_lists_initialized) stop "io_gather_module has not been initialized" @@ -108,10 +125,12 @@ subroutine gather_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) ! thread-safe procedure - subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) - use g_PARSUP + subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) + USE MOD_PARTIT + USE MOD_PARSUP use, intrinsic :: iso_fortran_env, only: real32 implicit none + type(t_partit), intent(inout), target :: partit real(real32), intent(in) :: arr2D(:) real(real32), intent(out) :: arr2D_global(:) integer, intent(in) :: root_rank ! rank of receiving process @@ -122,9 +141,11 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: remote_node_count = -1 real(real32), allocatable :: sendbuf(:) real(real32), allocatable :: recvbuf(:) ! todo: alloc only for root_rank - integer :: req(npes-1) + integer :: req(partit%npes-1) integer :: request_index integer :: mpi_precision = MPI_REAL +#include "associate_part_def.h" +#include "associate_part_ass.h" if(.not. nod2D_lists_initialized) stop "io_gather_module has not been initialized" @@ -133,10 +154,12 @@ subroutine gather_real4_nod2D(arr2D, arr2D_global, root_rank, tag, io_comm) ! thread-safe procedure - subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) - use g_PARSUP + subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) + USE MOD_PARTIT + USE MOD_PARSUP use, intrinsic :: iso_fortran_env, only: real64 implicit none + type(t_partit), intent(inout), target :: partit real(real64), intent(in) :: arr2D(:) real(real64), intent(out) :: arr2D_global(:) integer, intent(in) :: root_rank ! rank of receiving process @@ -147,9 +170,11 @@ subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: remote_elem_count = -1 real(real64), allocatable :: sendbuf(:) real(real64), allocatable :: recvbuf(:) - integer :: req(npes-1) + integer :: req(partit%npes-1) integer :: request_index integer :: mpi_precision = MPI_DOUBLE_PRECISION +#include "associate_part_def.h" +#include "associate_part_ass.h" if(.not. elem2D_lists_initialized) stop "io_gather_module has not been initialized" @@ -158,10 +183,12 @@ subroutine gather_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) ! thread-safe procedure - subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) - use g_PARSUP + subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm, partit) + USE MOD_PARTIT + USE MOD_PARSUP use, intrinsic :: iso_fortran_env, only: real32 implicit none + type(t_partit), intent(inout), target :: partit real(real32), intent(in) :: arr2D(:) real(real32), intent(out) :: arr2D_global(:) integer, intent(in) :: root_rank ! rank of receiving process @@ -172,9 +199,11 @@ subroutine gather_real4_elem2D(arr2D, arr2D_global, root_rank, tag, io_comm) integer :: remote_elem_count = -1 real(real32), allocatable :: sendbuf(:) real(real32), allocatable :: recvbuf(:) - integer :: req(npes-1) + integer :: req(partit%npes-1) integer :: request_index integer :: mpi_precision = MPI_REAL +#include "associate_part_def.h" +#include "associate_part_ass.h" if(.not. elem2D_lists_initialized) stop "io_gather_module has not been initialized" diff --git a/src/io_meandata.F90 b/src/io_meandata.F90 index 23a827d8b..055a33c5c 100644 --- a/src/io_meandata.F90 +++ b/src/io_meandata.F90 @@ -1,5 +1,6 @@ module io_MEANDATA - + USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM, only : WP use, intrinsic :: iso_fortran_env, only: real64, real32 use io_data_strategy_module @@ -16,6 +17,7 @@ module io_MEANDATA type Meandata private + type(t_partit), pointer :: p_partit integer :: ndim integer :: glsize(2) integer :: accuracy @@ -60,7 +62,8 @@ module io_MEANDATA ! !-------------------------------------------------------------------------------------------- ! - integer, save :: io_listsize=0 + integer, save :: io_listsize =0 + logical, save :: vec_autorotate=.FALSE. type io_entry CHARACTER(len=15) :: id ='unknown ' INTEGER :: freq =0 @@ -91,26 +94,37 @@ subroutine destructor(this) end subroutine -subroutine ini_mean_io(mesh) +subroutine ini_mean_io(ice, dynamics, tracers, partit, mesh) + use MOD_MESH + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE MOD_ICE use g_cvmix_tke use g_cvmix_idemix use g_cvmix_kpp use g_cvmix_tidal - use g_PARSUP use diagnostics - use i_PARAM, only: whichEVP implicit none integer :: i, j integer, save :: nm_io_unit = 103 ! unit to open namelist file, skip 100-102 for cray integer :: iost - integer,dimension(12) :: sel_forcvar=0 + integer,dimension(15) :: sel_forcvar=0 character(len=10) :: id_string type(t_mesh), intent(in) , target :: mesh - namelist /nml_listsize/ io_listsize + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice + namelist /nml_general / io_listsize, vec_autorotate namelist /nml_list / io_list -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! OPEN and read namelist for I/O open( unit=nm_io_unit, file='namelist.io', form='formatted', access='sequential', status='old', iostat=iost ) @@ -118,10 +132,10 @@ subroutine ini_mean_io(mesh) if (mype==0) WRITE(*,*) ' file : ', 'namelist.io',' open ok' else if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.io',' ; iostat=',iost - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif - READ(nm_io_unit, nml=nml_listsize, iostat=iost ) + READ(nm_io_unit, nml=nml_general, iostat=iost ) allocate(io_list(io_listsize)) READ(nm_io_unit, nml=nml_list, iostat=iost ) close(nm_io_unit ) @@ -139,243 +153,258 @@ subroutine ini_mean_io(mesh) SELECT CASE (trim(io_list(i)%id)) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!2D streams!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! CASE ('sst ') - call def_stream(nod2D, myDim_nod2D, 'sst', 'sea surface temperature', 'C', tr_arr(1,1:myDim_nod2D,1), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'sst', 'sea surface temperature', 'C', tracers%data(1)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('sss ') - call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tr_arr(1,1:myDim_nod2D,2), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'sss', 'sea surface salinity', 'psu', tracers%data(2)%values(1,1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ssh ') - call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'ssh', 'sea surface elevation', 'm', dynamics%eta_n, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('vve_5 ') - call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', Wvel(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) - + call def_stream(nod2D, myDim_nod2D, 'vve_5', 'vertical velocity at 5th level', 'm/s', dynamics%w(5,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + +CASE ('ssh_rhs ') + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs', 'ssh rhs', '?', dynamics%ssh_rhs, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) +CASE ('ssh_rhs_old ') + call def_stream(nod2D, myDim_nod2D, 'ssh_rhs_old', 'ssh rhs', '?', dynamics%ssh_rhs_old, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + !___________________________________________________________________________________________________________________________________ ! output sea ice CASE ('uice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'uice', 'ice velocity x', 'm/s', u_ice, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'uice', 'ice velocity x', 'm/s', ice%uice(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('vice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'vice', 'ice velocity y', 'm/s', v_ice, io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'vice', 'ice velocity y', 'm/s', ice%vice(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('a_ice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'a_ice', 'ice concentration', '%', a_ice(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'a_ice', 'ice concentration', '%', ice%data(1)%values(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('m_ice ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'm_ice', 'ice height', 'm', m_ice(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'm_ice', 'ice height', 'm', ice%data(2)%values(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('thdgr ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'thdgr', 'growth rate ice', 'm/s', thdgr(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'thdgr', 'thermodynamic growth rate ice', 'm/s', ice%thermo%thdgr(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('thdgrsn ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'thdgrsn', 'growth rate ice', 'm/s', thdgrsn(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'thdgrsn', 'thermodynamic growth rate snow', 'm/s', ice%thermo%thdgrsn(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + end if +CASE ('flice ') + if (use_ice) then + call def_stream(nod2D, myDim_nod2D, 'flice', 'flooding growth rate ice', 'm/s', flice(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('m_snow ') if (use_ice) then - call def_stream(nod2D, myDim_nod2D, 'm_snow', 'snow height', 'm', m_snow(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'm_snow', 'snow height', 'm', ice%data(3)%values(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output mixed layer depth CASE ('MLD1 ') - call def_stream(nod2D, myDim_nod2D, 'MLD1', 'Mixed Layer Depth', 'm', MLD1(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'MLD1', 'Mixed Layer Depth', 'm', MLD1(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('MLD2 ') - call def_stream(nod2D, myDim_nod2D, 'MLD2', 'Mixed Layer Depth', 'm', MLD2(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'MLD2', 'Mixed Layer Depth', 'm', MLD2(1:myDim_nod2D), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ ! output surface forcing CASE ('fh ') - call def_stream(nod2D, myDim_nod2D, 'fh', 'heat flux', 'W', heat_flux_in(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'fh', 'heat flux', 'W', heat_flux_in(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('fw ') - call def_stream(nod2D, myDim_nod2D, 'fw', 'fresh water flux', 'm/s', water_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'fw', 'fresh water flux', 'm/s', water_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmice_x ') - call def_stream(nod2D, myDim_nod2D, 'atmice_x', 'stress atmice x', 'N/m2', stress_atmice_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmice_x', 'stress atmice x', 'N/m2', ice%stress_atmice_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmice_y ') - call def_stream(nod2D, myDim_nod2D, 'atmice_y', 'stress atmice y', 'N/m2', stress_atmice_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmice_y', 'stress atmice y', 'N/m2', ice%stress_atmice_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmoce_x ') - call def_stream(nod2D, myDim_nod2D, 'atmoce_x', 'stress atmoce x', 'N/m2', stress_atmoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmoce_x', 'stress atmoce x', 'N/m2', stress_atmoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('atmoce_y ') - call def_stream(nod2D, myDim_nod2D, 'atmoce_y', 'stress atmoce y', 'N/m2', stress_atmoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'atmoce_y', 'stress atmoce y', 'N/m2', stress_atmoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('iceoce_x ') - call def_stream(nod2D, myDim_nod2D, 'iceoce_x', 'stress iceoce x', 'N/m2', stress_iceoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'iceoce_x', 'stress iceoce x', 'N/m2', ice%stress_iceoce_x(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('iceoce_y ') - call def_stream(nod2D, myDim_nod2D, 'iceoce_y', 'stress iceoce y', 'N/m2', stress_iceoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'iceoce_y', 'stress iceoce y', 'N/m2', ice%stress_iceoce_y(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('alpha ') - call def_stream(nod2D, myDim_nod2D, 'alpha', 'thermal expansion', 'none', sw_alpha(1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'alpha', 'thermal expansion', 'none', sw_alpha(1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('beta ') - call def_stream(nod2D, myDim_nod2D, 'beta', 'saline contraction', 'none', sw_beta (1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'beta', 'saline contraction', 'none', sw_beta (1,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) +CASE ('dens_flux ') + call def_stream(nod2D, myDim_nod2D , 'dflux', 'density flux', 'kg/(m3*s)', dens_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('runoff ') sel_forcvar(10)= 1 - call def_stream(nod2D, myDim_nod2D, 'runoff', 'river runoff', 'none', runoff(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'runoff', 'river runoff', 'none', runoff(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('evap ') sel_forcvar(7) = 1 - call def_stream(nod2D, myDim_nod2D, 'evap', 'evaporation', 'm/s', evaporation(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'evap', 'evaporation', 'm/s', evaporation(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('prec ') sel_forcvar(5) = 1 - call def_stream(nod2D, myDim_nod2D, 'prec', 'precicipation rain', 'm/s', prec_rain(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'prec', 'precicipation rain', 'm/s', prec_rain(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('snow ') sel_forcvar(6) = 1 - call def_stream(nod2D, myDim_nod2D, 'snow', 'precicipation snow', 'm/s', prec_snow(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'snow', 'precicipation snow', 'm/s', prec_snow(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('tair ') sel_forcvar(3) = 1 - call def_stream(nod2D, myDim_nod2D, 'tair', 'surface air temperature', '°C', Tair(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'tair', 'surface air temperature', '°C', Tair(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('shum ') sel_forcvar(4) = 1 - call def_stream(nod2D, myDim_nod2D, 'shum', 'specific humidity', '', shum(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'shum', 'specific humidity', '', shum(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('swr ') sel_forcvar(8) = 1 - call def_stream(nod2D, myDim_nod2D, 'swr', 'short wave radiation', 'W/m^2', shortwave(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'swr', 'short wave radiation', 'W/m^2', shortwave(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('lwr ') sel_forcvar(9) = 1 - call def_stream(nod2D, myDim_nod2D, 'lwr', 'long wave radiation', 'W/m^2', longwave(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'lwr', 'long wave radiation', 'W/m^2', longwave(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('uwind ') sel_forcvar(1) = 1 - call def_stream(nod2D, myDim_nod2D, 'uwind', '10m zonal surface wind velocity', 'm/s', u_wind(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'uwind', '10m zonal surface wind velocity', 'm/s', u_wind(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('vwind ') sel_forcvar(2) = 1 - call def_stream(nod2D, myDim_nod2D, 'vwind', '10m merid. surface wind velocity','m/s', v_wind(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'vwind', '10m merid. surface wind velocity','m/s', v_wind(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ ! output KPP vertical mixing schemes CASE ('kpp_obldepth ') if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then! fesom KPP - call def_stream(nod2D, myDim_nod2D, 'kpp_obldepth', 'KPP ocean boundary layer depth', 'm', hbl(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'kpp_obldepth', 'KPP ocean boundary layer depth', 'm', hbl(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then ! cvmix KPP - call def_stream(nod2D, myDim_nod2D, 'kpp_obldepth', 'KPP ocean boundary layer depth', 'm', kpp_obldepth(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'kpp_obldepth', 'KPP ocean boundary layer depth', 'm', kpp_obldepth(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('kpp_sbuoyflx') if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then ! fesom KPP - call def_stream(nod2D, myDim_nod2D, 'kpp_sbuoyflx', 'surface buoyancy flux', 'm2/s3', Bo(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'kpp_sbuoyflx', 'surface buoyancy flux', 'm2/s3', Bo(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then ! cvmix KPP - call def_stream(nod2D, myDim_nod2D, 'kpp_sbuoyflx', 'surface buoyancy flux', 'm2/s3', kpp_sbuoyflx(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'kpp_sbuoyflx', 'surface buoyancy flux', 'm2/s3', kpp_sbuoyflx(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('tx_sur ') sel_forcvar(11) = 1 - call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind str. to ocean', 'm/s2', stress_surf(1, :), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind str. to ocean', 'm/s2', stress_surf(1, :), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ty_sur ') sel_forcvar(12) = 1 - call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind str. to ocean', 'm/s2', stress_surf(2, :), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind str. to ocean', 'm/s2', stress_surf(2, :), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('curl_surf ') if (lcurt_stress_surf) then - call def_stream(nod2D, myDim_nod2D, 'curl_surf', 'vorticity of the surface stress','none', curl_stress_surf(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) - end if + call def_stream(nod2D, myDim_nod2D, 'curl_surf', 'vorticity of the surface stress','none', curl_stress_surf(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) + end if !___________________________________________________________________________________________________________________________________ ! output Ferrari/GM parameterisation 2D CASE ('fer_C ') if (Fer_GM) then - call def_stream(nod2D, myDim_nod2D, 'fer_C', 'GM, depth independent speed', 'm/s' , fer_c(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'fer_C', 'GM, depth independent speed', 'm/s' , fer_c(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ !>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 3D streams <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< !___________________________________________________________________________________________________________________________________ CASE ('temp ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'temp', 'temperature', 'C', tr_arr(:,:,1), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'temp', 'temperature', 'C', tracers%data(1)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('salt ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'salt', 'salinity', 'psu', tr_arr(:,:,2), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'salt', 'salinity', 'psu', tracers%data(2)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('otracers ') - do j=3, num_tracers - write (id_string, "(I3.3)") tracer_id(j) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'tra_'//id_string, 'pasive tracer ID='//id_string, 'n/a', tr_arr(:,:,j), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + do j=3, tracers%num_tracers + write (id_string, "(I3.3)") tracers%data(j)%ID + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'tra_'//id_string, 'pasive tracer ID='//id_string, 'n/a', tracers%data(j)%values(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end do CASE ('slope_x ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_x', 'neutral slope X', 'none', slope_tapered(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_x', 'neutral slope X', 'none', slope_tapered(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('slope_y ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_y', 'neutral slope Y', 'none', slope_tapered(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_y', 'neutral slope Y', 'none', slope_tapered(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('slope_z ') - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_z', 'neutral slope Z', 'none', slope_tapered(3,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'slope_z', 'neutral slope Z', 'none', slope_tapered(3,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('N2 ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'N2', 'brunt väisälä', '1/s2', bvfreq(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'N2', 'brunt väisälä', '1/s2', bvfreq(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Kv ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'Kv', 'vertical diffusivity Kv', 'm2/s', Kv(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u', 'horizontal velocity','m/s', dynamics%uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('v ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v', 'meridional velocity','m/s', dynamics%uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) +CASE ('unod ') + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/),'unod', 'horizontal velocity at nodes', 'm/s', dynamics%uvnode(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) +CASE ('vnod ') + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/),'vnod', 'meridional velocity at nodes', 'm/s', dynamics%uvnode(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('w ') - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'w', 'vertical velocity', 'm/s', dynamics%w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('Av ') - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'vertical viscosity Av', 'm2/s', Av(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('u_dis_tend') - if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_dis_tend', 'horizontal velocity viscosity tendency', 'm/s', UV_dis_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + if(dynamics%opt_visc==8) then + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_dis_tend', 'horizontal velocity viscosity tendency', 'm/s', UV_dis_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_dis_tend') - if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_dis_tend', 'meridional velocity viscosity tendency', 'm/s', UV_dis_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + if(dynamics%opt_visc==8) then + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_dis_tend', 'meridional velocity viscosity tendency', 'm/s', UV_dis_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_back_tend') - if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_back_tend', 'horizontal velocity backscatter tendency', 'm2/s2', UV_back_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + if(dynamics%opt_visc==8) then + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_back_tend', 'horizontal velocity backscatter tendency', 'm2/s2', UV_back_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_back_tend') - if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_back_tend', 'meridional velocity backscatter tendency', 'm2/s2', UV_back_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + if(dynamics%opt_visc==8) then + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_back_tend', 'meridional velocity backscatter tendency', 'm2/s2', UV_back_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('u_total_tend') - if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_total_tend', 'horizontal velocity total viscosity tendency', 'm/s', UV_total_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + if(dynamics%opt_visc==8) then + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'u_total_tend', 'horizontal velocity total viscosity tendency', 'm/s', UV_total_tend(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('v_total_tend') - if(visc_option==8) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_total_tend', 'meridional velocity total viscosity tendency', 'm/s', UV_total_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + if(dynamics%opt_visc==8) then + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'v_total_tend', 'meridional velocity total viscosity tendency', 'm/s', UV_total_tend(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output Ferrari/GM parameterisation CASE ('bolus_u ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_u', 'GM bolus velocity U','m/s', dynamics%fer_uv(1,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_v ') if (Fer_GM) then - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'bolus_v', 'GM bolus velocity V','m/s', dynamics%fer_uv(2,:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('bolus_w ') if (Fer_GM) then - call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', fer_Wvel(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'bolus_w', 'GM bolus velocity W','m/s', dynamics%fer_w(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('fer_K ') if (Fer_GM) then - call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'fer_K', 'GM, stirring diff.','m2/s', fer_k(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl , nod2D /), (/nl, myDim_nod2D /), 'fer_K', 'GM, stirring diff.','m2/s', fer_k(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('fer_scal ') if (Fer_GM) then - call def_stream( nod2D , myDim_nod2D , 'fer_scal', 'GM surface scaling','', fer_scal(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream( nod2D , myDim_nod2D , 'fer_scal', 'GM surface scaling','', fer_scal(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) end if CASE ('dMOC ') if (ldiag_dMOC) then - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'U_rho_x_DZ', 'fluxes for density MOC', 'fluxes', std_dens_UVDZ(1,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'V_rho_x_DZ', 'fluxes for density MOC', 'fluxes', std_dens_UVDZ(2,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_heat_flux', 'HF bouyancy flux ', 'kg*m/s' ,std_dens_flux(1,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_rest_flux', 'RESTOR. bouyancy flux ', 'kg*m/s' ,std_dens_flux(2,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_frwt_flux', 'FW bouyancy flux ', 'kg*m/s' ,std_dens_flux(3,:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_dens_dVdT', 'dV/dT', 'm3/s' ,std_dens_dVdT(:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, nod2D /), (/std_dens_N, myDim_nod2D/), 'std_dens_DIV', 'm3/s', 'm3/s' ,std_dens_DIV(:,:), 1, 'y', i_real4, mesh) - call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_dens_Z', 'm', 'm' ,std_dens_Z(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl-1, nod2D /), (/nl-1, myDim_nod2D /), 'density_dMOC', 'density' , 'm', density_dmoc(:,:), 1, 'y', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D , 'density_flux', 'density' , 'm', dens_flux(:), 1, 'y', i_real4, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'U_rho_x_DZ', 'fluxes for density MOC', 'fluxes', std_dens_UVDZ(1,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'V_rho_x_DZ', 'fluxes for density MOC', 'fluxes', std_dens_UVDZ(2,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_heat_flux', 'HF bouyancy flux ', 'kg*m/s' ,std_dens_flux(1,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_rest_flux', 'RESTOR. bouyancy flux ', 'kg*m/s' ,std_dens_flux(2,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_frwt_flux', 'FW bouyancy flux ', 'kg*m/s' ,std_dens_flux(3,:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_dens_dVdT', 'dV/dT', 'm3/s' ,std_dens_dVdT(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, nod2D /), (/std_dens_N, myDim_nod2D/), 'std_dens_DIV', 'm3/s', 'm3/s' ,std_dens_DIV(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/std_dens_N, elem2D/), (/std_dens_N, myDim_elem2D/), 'std_dens_Z', 'm', 'm' ,std_dens_Z(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl-1, nod2D /), (/nl-1, myDim_nod2D /), 'density_dMOC', 'density' , 'm', density_dmoc(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D , 'density_flux_e', 'density flux at elems ', 'm', dens_flux_e(:), 1, 'y', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ CASE ('pgf_x ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_x', 'zonal pressure gradient force' , 'm/s^2', pgf_x(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_x', 'zonal pressure gradient force' , 'm/s^2', pgf_x(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('pgf_y ') - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_y', 'meridional pressure gradient force', 'm/s^2', pgf_y(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'pgf_y', 'meridional pressure gradient force', 'm/s^2', pgf_y(:,:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) !___________________________________________________________________________________________________________________________________ #if defined (__oifs) CASE ('alb ') - call def_stream(nod2D, myDim_nod2D, 'alb', 'ice albedo', 'none', ice_alb(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'alb', 'ice albedo', 'none', ice%atmcoupl%ice_alb(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('ist ') - call def_stream(nod2D, myDim_nod2D, 'ist', 'ice surface temperature', 'K', ice_temp(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'ist', 'ice surface temperature', 'K', ice%data(4)%values(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qsi ') - call def_stream(nod2D, myDim_nod2D, 'qsi', 'ice heat flux', 'W/m^2', ice_heat_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'qsi', 'ice heat flux', 'W/m^2', ice%atmcoupl%ice_flx_h(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) CASE ('qso ') - call def_stream(nod2D, myDim_nod2D, 'qso', 'oce heat flux', 'W/m^2', oce_heat_flux(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, mesh) + call def_stream(nod2D, myDim_nod2D, 'qso', 'oce heat flux', 'W/m^2', ice%atmcoupl%oce_flx_h(:), io_list(i)%freq, io_list(i)%unit, io_list(i)%precision, partit, mesh) #endif !___________________________________________________________________________________________________________________________________ @@ -388,131 +417,132 @@ subroutine ini_mean_io(mesh) !3D if (ldiag_energy) then - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'rhof', 'in-situ density at faces', 'kg/m3', rhof(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wrhof', 'vertical velocity x density', 'kg/(s*m2)', wrhof(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uu', 'u times u', 'm2/s2', u_x_u(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uv', 'u times v', 'm2/s2', u_x_v(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vv', 'v times v', 'm2/s2', v_x_v(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'uw', 'u times w', 'm2/s2', u_x_w(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'vw', 'v times w', 'm2/s2', v_x_w(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudx', 'du/dx', '1/s', dudx(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudy', 'du/dy', '1/s', dudy(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdx', 'dv/dx', '1/s', dvdx(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdy', 'dv/dy', '1/s', dvdy(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dudz', 'du/dz', '1/s', dudz(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dvdz', 'dv/dz', '1/s', dvdz(:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz', 'int(Av * du/dz)', 'm3/s2', av_dudz(:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'unod', 'horizontal velocity at nodes', 'm/s', Unode(1,:,:), 1, 'm', i_real8, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vnod', 'meridional velocity at nodes', 'm/s', Unode(2,:,:), 1, 'm', i_real8, mesh) - - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', uv(1,:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', uv(2,:,:), 1, 'm', i_real4, mesh) - call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', Wvel(:,:), 1, 'm', i_real8, mesh) - - call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'utau_bott', '(u, tau) at the bottom', 'N/(m s)', utau_bott(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'u_bott', 'bottom velocity', 'm/s', u_bott(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'v_bott', 'bottom velocity', 'm/s', v_bott(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'u_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'v_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'tx_bot', 'bottom stress x', 'N/m2', stress_bott(1, 1:myDim_elem2D),1, 'm', i_real4, mesh) - call def_stream(elem2D, myDim_elem2D, 'ty_bot', 'bottom stress y', 'N/m2', stress_bott(2, 1:myDim_elem2D),1, 'm', i_real4, mesh) - if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind stress to ocean', 'm/s2', stress_surf(1, 1:myDim_elem2D),1, 'm', i_real4, mesh) ; sel_forcvar(11)=1 - if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind stress to ocean','m/s2', stress_surf(2, 1:myDim_elem2D),1, 'm', i_real4, mesh) ; sel_forcvar(12)=1 + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'rhof', 'in-situ density at faces', 'kg/m3', rhof(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wrhof', 'vertical velocity x density', 'kg/(s*m2)', wrhof(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uu', 'u times u', 'm2/s2', u_x_u(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'uv', 'u times v', 'm2/s2', u_x_v(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'vv', 'v times v', 'm2/s2', v_x_v(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'uw', 'u times w', 'm2/s2', u_x_w(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl-1, myDim_elem2D/),'vw', 'v times w', 'm2/s2', v_x_w(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudx', 'du/dx', '1/s', dudx(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dudy', 'du/dy', '1/s', dudy(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdx', 'dv/dx', '1/s', dvdx(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvdy', 'dv/dy', '1/s', dvdy(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dudz', 'du/dz', '1/s', dudz(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'dvdz', 'dv/dz', '1/s', dvdz(:,:), 1, 'm', i_real8, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz', 'int(Av * du/dz)', 'm3/s2', av_dudz(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dvdz', 'int(Av * dv/dz)', 'm3/s2', av_dvdz(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'av_dudz_sq', 'Av * (du/dz)^2', 'm^2/s^3', av_dudz_sq(:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, elem2D/), (/nl, myDim_elem2D/), 'Av', 'Vertical mixing A', 'm2/s', Av(:,:), 1, 'm', i_real4, partit, mesh) + + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'um', 'horizontal velocity', 'm/s', dynamics%uv(1,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, elem2D/), (/nl-1, myDim_elem2D/), 'vm', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl, nod2D/), (/nl, myDim_nod2D/), 'wm', 'vertical velocity', 'm/s', dynamics%w(:,:), 1, 'm', i_real8, partit, mesh) + + call def_stream(elem2D, myDim_elem2D, 'utau_surf', '(u, tau) at the surface', 'N/(m s)', utau_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'utau_bott', '(u, tau) at the bottom', 'N/(m s)', utau_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'u_bott', 'bottom velocity', 'm/s', u_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'v_bott', 'bottom velocity', 'm/s', v_bott(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'u_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'v_surf', 'surface velocity', 'm/s', u_surf(1:myDim_elem2D), 1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'tx_bot', 'bottom stress x', 'N/m2', stress_bott(1, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) + call def_stream(elem2D, myDim_elem2D, 'ty_bot', 'bottom stress y', 'N/m2', stress_bott(2, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) + if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind stress to ocean', 'm/s2', stress_surf(1, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) ; sel_forcvar(11)=1 + if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind stress to ocean','m/s2', stress_surf(2, 1:myDim_elem2D),1, 'm', i_real4, partit, mesh) ; sel_forcvar(12)=1 end if if (mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then ! TKE diagnostic - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke' , 'turbulent kinetic energy' , 'm^2/s^2', tke(:,:) , 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Ttot', 'total production of turbulent kinetic energy', 'm^2/s^3', tke_Ttot(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tbpr', 'TKE production by buoyancy' , 'm^2/s^3', tke_Tbpr(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tspr', 'TKE production by shear' , 'm^2/s^3', tke_Tspr(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tdif', 'TKE production by vertical diffusion' , 'm^2/s^3', tke_Tdif(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tdis', 'TKE production by dissipation' , 'm^2/s^3', tke_Tdis(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Twin', 'TKE production by wind' , 'm^2/s^3', tke_Twin(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tbck', 'background forcing for TKE' , 'm^2/s^3', tke_Tbck(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Lmix', 'mixing length scale of TKE' , 'm' , tke_Lmix(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Pr' , 'Prantl number' , '' , tke_Pr(:,:) , 1, 'y', i_real4, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke' , 'turbulent kinetic energy' , 'm^2/s^2', tke(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Ttot', 'total production of turbulent kinetic energy', 'm^2/s^3', tke_Ttot(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tbpr', 'TKE production by buoyancy' , 'm^2/s^3', tke_Tbpr(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tspr', 'TKE production by shear' , 'm^2/s^3', tke_Tspr(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tdif', 'TKE production by vertical diffusion' , 'm^2/s^3', tke_Tdif(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tdis', 'TKE production by dissipation' , 'm^2/s^3', tke_Tdis(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Twin', 'TKE production by wind' , 'm^2/s^3', tke_Twin(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tbck', 'background forcing for TKE' , 'm^2/s^3', tke_Tbck(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Lmix', 'mixing length scale of TKE' , 'm' , tke_Lmix(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Pr' , 'Prantl number' , '' , tke_Pr(:,:) , 1, 'y', i_real4, partit, mesh) if (mix_scheme_nmb==56) then ! TKE-IDEMIX diagnostic - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tiwf', 'TKE production by internal waves (IDEMIX)', 'm^2/s^3', tke_Tiwf(:,:), 1, 'y', i_real4, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tke_Tiwf', 'TKE production by internal waves (IDEMIX)', 'm^2/s^3', tke_Tiwf(:,:), 1, 'y', i_real4, partit, mesh) end if end if if (mod(mix_scheme_nmb,10)==6) then ! IDEMIX Internal-Wave-Energy diagnostics - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe' , 'internal wave energy' , 'm^2/s^2', iwe(:,:) , 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Ttot', 'total production of internal wave energy', 'm^2/s^2', iwe_Ttot(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tdif', 'IWE production by vertical diffusion' , 'm^2/s^3', iwe_Tdif(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tdis', 'IWE production by dissipation' , 'm^2/s^3', iwe_Tdis(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tsur', 'IWE production from surface forcing' , 'm^2/s^2', iwe_Tsur(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tbot', 'IWE production from bottom forcing' , 'm^2/s^2', iwe_Tbot(:,:), 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_c0' , 'IWE vertical group velocity' , 'm/s' , iwe_c0(:,:) , 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_v0' , 'IWE horizontal group velocity' , 'm/s' , iwe_c0(:,:) , 1, 'y', i_real4, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe' , 'internal wave energy' , 'm^2/s^2', iwe(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Ttot', 'total production of internal wave energy', 'm^2/s^2', iwe_Ttot(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tdif', 'IWE production by vertical diffusion' , 'm^2/s^3', iwe_Tdif(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tdis', 'IWE production by dissipation' , 'm^2/s^3', iwe_Tdis(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tsur', 'IWE production from surface forcing' , 'm^2/s^2', iwe_Tsur(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_Tbot', 'IWE production from bottom forcing' , 'm^2/s^2', iwe_Tbot(:,:), 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_c0' , 'IWE vertical group velocity' , 'm/s' , iwe_c0(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'iwe_v0' , 'IWE horizontal group velocity' , 'm/s' , iwe_c0(:,:) , 1, 'y', i_real4, partit, mesh) end if if (mod(mix_scheme_nmb,10)==7) then ! cvmix_TIDAL diagnostics - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tidal_Kv' , 'tidal diffusivity' , 'm^2/s' , tidal_Kv(:,:) , 1, 'y', i_real4, mesh) - call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tidal_Av' , 'tidal viscosity' , 'm^2/s' , tidal_Av(:,:) , 1, 'y', i_real4, mesh) - call def_stream( nod2D , myDim_nod2D , 'tidal_forcbot', 'near tidal bottom forcing', 'W/m^2' , tidal_forc_bottom_2D , 100, 'y', i_real4, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tidal_Kv' , 'tidal diffusivity' , 'm^2/s' , tidal_Kv(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream((/nl,nod2D/), (/nl,myDim_nod2D/), 'tidal_Av' , 'tidal viscosity' , 'm^2/s' , tidal_Av(:,:) , 1, 'y', i_real4, partit, mesh) + call def_stream( nod2D , myDim_nod2D , 'tidal_forcbot', 'near tidal bottom forcing', 'W/m^2' , tidal_forc_bottom_2D , 100, 'y', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output Redi parameterisation if (Redi) then - call def_stream((/nl-1 , nod2D /), (/nl-1, myDim_nod2D /), 'Redi_K', 'Redi diffusion coefficient', 'm2/s', Ki(:,:), 1, 'y', i_real4, mesh) + call def_stream((/nl-1 , nod2D /), (/nl-1, myDim_nod2D /), 'Redi_K', 'Redi diffusion coefficient', 'm2/s', Ki(:,:), 1, 'y', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ ! output Monin-Obukov (TB04) mixing length if (use_momix) then - call def_stream(nod2D, myDim_nod2D, 'momix_length', 'Monin-Obukov mixing length', 'm', mixlength(:), 1, 'm', i_real4, mesh) + call def_stream(nod2D, myDim_nod2D, 'momix_length', 'Monin-Obukov mixing length', 'm', mixlength(:), 1, 'm', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ if (ldiag_curl_vel3) then - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'curl_u', 'relative vorticity', '1/s', vorticity, 1, 'm', i_real4, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'curl_u', 'relative vorticity', '1/s', vorticity, 1, 'm', i_real4, partit, mesh) end if !___________________________________________________________________________________________________________________________________ - if (whichEVP==1) then + if (ice%whichEVP==1) then end if - if (whichEVP==2) then - call def_stream(elem2D, myDim_elem2D, 'alpha_EVP', 'alpha in EVP', 'n/a', alpha_evp_array, 1, 'd', i_real4, mesh) - call def_stream(nod2D, myDim_nod2D, 'beta_EVP', 'beta in EVP', 'n/a', beta_evp_array, 1, 'd', i_real4, mesh) + if (ice%whichEVP==2) then + call def_stream(elem2D, myDim_elem2D, 'alpha_EVP', 'alpha in EVP', 'n/a', ice%alpha_evp_array, 1, 'd', i_real4, partit, mesh) + call def_stream(nod2D, myDim_nod2D, 'beta_EVP', 'beta in EVP', 'n/a', ice%beta_evp_array, 1, 'd', i_real4, partit, mesh) end if !___________________________________________________________________________ if (ldiag_dvd) then - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_h', 'horiz. dvd of temperature', '°C/s' , tr_dvd_horiz(:,:,1), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_v', 'vert. dvd of temperature' , '°C/s' , tr_dvd_vert(:,:,1) , 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_h', 'horiz. dvd of salinity' , 'psu/s', tr_dvd_horiz(:,:,2), 1, 'm', i_real4, mesh) - call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_v', 'vert. dvd of salinity' , 'psu/s', tr_dvd_vert(:,:,2) , 1, 'm', i_real4, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_h', 'horiz. dvd of temperature', '°C/s' , tracers%work%tr_dvd_horiz(:,:,1), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_temp_v', 'vert. dvd of temperature' , '°C/s' , tracers%work%tr_dvd_vert(:,:,1) , 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_h', 'horiz. dvd of salinity' , 'psu/s', tracers%work%tr_dvd_horiz(:,:,2), 1, 'm', i_real4, partit, mesh) + call def_stream((/nl-1, nod2D/), (/nl-1, myDim_nod2D/), 'dvd_salt_v', 'vert. dvd of salinity' , 'psu/s', tracers%work%tr_dvd_vert(:,:,2) , 1, 'm', i_real4, partit, mesh) end if !___________________________________________________________________________ if (ldiag_forc) then - if (sel_forcvar( 1)==0) call def_stream(nod2D , myDim_nod2D , 'uwind' , '10m zonal surface wind velocity', 'm/s' , u_wind(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 2)==0) call def_stream(nod2D , myDim_nod2D , 'vwind' , '10m merid surface wind velocity', 'm/s' , v_wind(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 3)==0) call def_stream(nod2D , myDim_nod2D , 'tair' , 'surface air temperature' , '°C' , Tair(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 4)==0) call def_stream(nod2D , myDim_nod2D , 'shum' , 'specific humidity' , '' , shum(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 5)==0) call def_stream(nod2D , myDim_nod2D , 'prec' , 'precicipation rain' , 'm/s' , prec_rain(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 6)==0) call def_stream(nod2D , myDim_nod2D , 'snow' , 'precicipation snow' , 'm/s' , prec_snow(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 7)==0) call def_stream(nod2D , myDim_nod2D , 'evap' , 'evaporation' , 'm/s' , evaporation(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 8)==0) call def_stream(nod2D , myDim_nod2D , 'swr' , 'short wave radiation' , 'W/m^2', shortwave(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar( 9)==0) call def_stream(nod2D , myDim_nod2D , 'lwr' , 'long wave radiation' , 'W/m^2', longwave(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar(10)==0) call def_stream(nod2D , myDim_nod2D , 'runoff', 'river runoff' , 'none' , runoff(:) , 1, 'm', i_real4, mesh) - if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind str. to ocean' , 'm/s^2', stress_surf(1, :), 1, 'm', i_real4, mesh) - if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind str. to ocean' , 'm/s^2', stress_surf(2, :), 1, 'm', i_real4, mesh) - call def_stream(nod2D , myDim_nod2D , 'cd','wind drag coef. ' , '', cd_atm_oce_arr(:), 1, 'm', i_real4, mesh) - call def_stream(nod2D , myDim_nod2D , 'ch','transfer coeff. sensible heat', '', ch_atm_oce_arr(:), 1, 'm', i_real4, mesh) - call def_stream(nod2D , myDim_nod2D , 'ce','transfer coeff. evaporation ' , '', ce_atm_oce_arr(:), 1, 'm', i_real4, mesh) + if (sel_forcvar( 1)==0) call def_stream(nod2D , myDim_nod2D , 'uwind' , '10m zonal surface wind velocity', 'm/s' , u_wind(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 2)==0) call def_stream(nod2D , myDim_nod2D , 'vwind' , '10m merid surface wind velocity', 'm/s' , v_wind(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 3)==0) call def_stream(nod2D , myDim_nod2D , 'tair' , 'surface air temperature' , '°C' , Tair(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 4)==0) call def_stream(nod2D , myDim_nod2D , 'shum' , 'specific humidity' , '' , shum(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 5)==0) call def_stream(nod2D , myDim_nod2D , 'prec' , 'precicipation rain' , 'm/s' , prec_rain(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 6)==0) call def_stream(nod2D , myDim_nod2D , 'snow' , 'precicipation snow' , 'm/s' , prec_snow(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 7)==0) call def_stream(nod2D , myDim_nod2D , 'evap' , 'evaporation' , 'm/s' , evaporation(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 8)==0) call def_stream(nod2D , myDim_nod2D , 'swr' , 'short wave radiation' , 'W/m^2', shortwave(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar( 9)==0) call def_stream(nod2D , myDim_nod2D , 'lwr' , 'long wave radiation' , 'W/m^2', longwave(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar(10)==0) call def_stream(nod2D , myDim_nod2D , 'runoff', 'river runoff' , 'none' , runoff(:) , 1, 'm', i_real4, partit, mesh) + if (sel_forcvar(11)==0) call def_stream(elem2D, myDim_elem2D, 'tx_sur', 'zonal wind str. to ocean' , 'm/s^2', stress_surf(1, :), 1, 'm', i_real4, partit, mesh) + if (sel_forcvar(12)==0) call def_stream(elem2D, myDim_elem2D, 'ty_sur', 'meridional wind str. to ocean' , 'm/s^2', stress_surf(2, :), 1, 'm', i_real4, partit, mesh) + call def_stream(nod2D , myDim_nod2D , 'cd', 'wind drag coef. ' , '', cd_atm_oce_arr(:), 1, 'm', i_real4, partit, mesh) + call def_stream(nod2D , myDim_nod2D , 'ch', 'transfer coeff. sensible heat', '', ch_atm_oce_arr(:), 1, 'm', i_real4, partit, mesh) + call def_stream(nod2D , myDim_nod2D , 'ce', 'transfer coeff. evaporation ' , '', ce_atm_oce_arr(:), 1, 'm', i_real4, partit, mesh) +#if defined (__oasis) + call def_stream(nod2D, myDim_nod2D, 'subli', 'sublimation', 'm/s', sublimation(:), 1, 'm', i_real4, partit, mesh) +#endif end if @@ -520,16 +550,18 @@ subroutine ini_mean_io(mesh) ! !-------------------------------------------------------------------------------------------- ! -function mesh_dimname_from_dimsize(size, mesh) result(name) +function mesh_dimname_from_dimsize(size, partit, mesh) result(name) use mod_mesh - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use diagnostics #if defined (__icepack) use icedrv_main, only: ncat ! number of ice thickness cathegories #endif implicit none integer :: size - type(t_mesh) mesh + type(t_mesh) , intent(in) :: mesh + type(t_partit), intent(in) :: partit character(50) :: name if (size==mesh%nod2D) then @@ -548,29 +580,34 @@ function mesh_dimname_from_dimsize(size, mesh) result(name) #endif else name='unknown' - if (mype==0) write(*,*) 'WARNING: unknown dimension in mean I/O with size of ', size + if (partit%mype==0) write(*,*) 'WARNING: unknown dimension in mean I/O with size of ', size end if end function ! !-------------------------------------------------------------------------------------------- ! -subroutine create_new_file(entry, mesh) +subroutine create_new_file(entry, ice, dynamics, partit, mesh) use g_clock - use g_PARSUP use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE MOD_ICE use fesom_version_info_module use g_config - use i_PARAM use o_PARAM implicit none character(2000) :: att_text - type(t_mesh) mesh + type(t_mesh) , intent(in) :: mesh + type(t_partit), intent(in) :: partit + type(t_dyn) , intent(in) :: dynamics + type(t_ice) , intent(in) :: ice type(Meandata), intent(inout) :: entry character(len=*), parameter :: global_attributes_prefix = "FESOM_" ! Serial output implemented so far - if (mype/=entry%root_rank) return + if (partit%mype/=entry%root_rank) return ! create an ocean output file write(*,*) 'initializing I/O file for ', trim(entry%name) @@ -589,7 +626,7 @@ subroutine create_new_file(entry, mesh) elseif (entry%dimname(1)=='ncat') then call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'long_name', len_trim('sea-ice thickness class'),'sea-ice thickness class'), __LINE__) else - if (mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' + if (partit%mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' end if call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'units', len_trim('m'),'m'), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%dimvarID(1), 'positive', len_trim('down'),'down'), __LINE__) @@ -609,8 +646,9 @@ subroutine create_new_file(entry, mesh) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'axis', len_trim('T'), trim('T')), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%tID, 'stored_direction', len_trim('increasing'), trim('increasing')), __LINE__) - call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, & - (/entry%dimid(1:entry%ndim), entry%recID/), entry%varID), __LINE__) + call assert_nf( nf_def_var(entry%ncid, trim(entry%name), entry%data_strategy%netcdf_type(), entry%ndim+1, (/entry%dimid(entry%ndim:1:-1), entry%recID/), entry%varID), __LINE__) + + call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'description', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'long_name', len_trim(entry%description), entry%description), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, entry%varID, 'units', len_trim(entry%units), entry%units), __LINE__) @@ -626,21 +664,21 @@ subroutine create_new_file(entry, mesh) call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'ClimateDataPath', len_trim(ClimateDataPath), trim(ClimateDataPath)), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'which_ALE', len_trim(which_ALE), trim(which_ALE)), __LINE__) call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'mix_scheme', len_trim(mix_scheme), trim(mix_scheme)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_hor', len_trim(tra_adv_hor), trim(tra_adv_hor)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_ver', len_trim(tra_adv_ver), trim(tra_adv_ver)), __LINE__) - call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_lim', len_trim(tra_adv_lim), trim(tra_adv_lim)), __LINE__) +! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_hor', len_trim(tra_adv_hor), trim(tra_adv_hor)), __LINE__) +! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_ver', len_trim(tra_adv_ver), trim(tra_adv_ver)), __LINE__) +! call assert_nf( nf_put_att_text(entry%ncid, NF_GLOBAL, global_attributes_prefix//'tra_adv_lim', len_trim(tra_adv_lim), trim(tra_adv_lim)), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'force_rotation', NF_INT, 1, force_rotation), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'include_fleapyear', NF_INT, 1, include_fleapyear), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_floatice', NF_INT, 1, use_floatice), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP', NF_INT, 1, whichEVP), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps', NF_INT, 1, evp_rheol_steps), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'visc_option', NF_INT, 1, visc_option), __LINE__) - call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'w_split', NF_INT, 1, w_split), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'whichEVP' , NF_INT, 1, ice%whichEVP), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'evp_rheol_steps' , NF_INT, 1, ice%evp_rheol_steps), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'opt_visc' , NF_INT, 1, dynamics%opt_visc), __LINE__) + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_wsplit' , NF_INT, 1, dynamics%use_wsplit), __LINE__) call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'use_partial_cell', NF_INT, 1, use_partial_cell), __LINE__) - + call assert_nf( nf_put_att_int(entry%ncid, NF_GLOBAL, global_attributes_prefix//'autorotate_back_to_geo', NF_INT, 1, vec_autorotate), __LINE__) @@ -651,7 +689,7 @@ subroutine create_new_file(entry, mesh) elseif (entry%dimname(1)=='nz1') then call assert_nf( nf_put_var_double(entry%ncid, entry%dimvarID(1), abs(mesh%Z)), __LINE__) else - if (mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' + if (partit%mype==0) write(*,*) 'WARNING: unknown first dimension in 2d mean I/O data' end if call assert_nf( nf_close(entry%ncid), __LINE__) @@ -660,7 +698,6 @@ subroutine create_new_file(entry, mesh) !-------------------------------------------------------------------------------------------- ! subroutine assoc_ids(entry) - use g_PARSUP implicit none type(Meandata), intent(inout) :: entry @@ -684,7 +721,6 @@ subroutine assoc_ids(entry) ! subroutine write_mean(entry, entry_index) use mod_mesh - use g_PARSUP use io_gather_module implicit none type(Meandata), intent(inout) :: entry @@ -692,10 +728,11 @@ subroutine write_mean(entry, entry_index) integer tag integer :: i, size1, size2, size_gen, size_lev, order integer :: c, lev + integer mpierr ! Serial output implemented so far - if (mype==entry%root_rank) then + if (entry%p_partit%mype==entry%root_rank) then write(*,*) 'writing mean record for ', trim(entry%name), '; rec. count = ', entry%rec_count call assert_nf( nf_put_vara_double(entry%ncid, entry%Tid, entry%rec_count, 1, entry%ctime_copy, 1), __LINE__) end if @@ -705,40 +742,48 @@ subroutine write_mean(entry, entry_index) tag = 2 ! we can use a fixed tag here as we have an individual communicator for each output field !___________writing 8 byte real_________________________________________ if (entry%accuracy == i_real8) then - if(mype==entry%root_rank) then + if(entry%p_partit%mype==entry%root_rank) then if(.not. allocated(entry%aux_r8)) allocate(entry%aux_r8(size2)) end if do lev=1, size1 +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + ! aleph cray-mpich workaround + call MPI_Barrier(entry%comm, mpierr) +#endif if(.not. entry%is_elem_based) then - call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm) + call gather_nod2D (entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%p_partit) else - call gather_elem2D(entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm) + call gather_elem2D(entry%local_values_r8_copy(lev,1:size(entry%local_values_r8_copy,dim=2)), entry%aux_r8, entry%root_rank, tag, entry%comm, entry%p_partit) end if - if (mype==entry%root_rank) then + if (entry%p_partit%mype==entry%root_rank) then if (entry%ndim==1) then call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r8, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r8, 1), __LINE__) + call assert_nf( nf_put_vara_double(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r8, 1), __LINE__) end if end if end do !___________writing 4 byte real _________________________________________ else if (entry%accuracy == i_real4) then - if(mype==entry%root_rank) then + if(entry%p_partit%mype==entry%root_rank) then if(.not. allocated(entry%aux_r4)) allocate(entry%aux_r4(size2)) end if do lev=1, size1 +#ifdef ENABLE_ALEPH_CRAYMPICH_WORKAROUNDS + ! aleph cray-mpich workaround + call MPI_Barrier(entry%comm, mpierr) +#endif if(.not. entry%is_elem_based) then - call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm) + call gather_real4_nod2D (entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%p_partit) else - call gather_real4_elem2D(entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm) + call gather_real4_elem2D(entry%local_values_r4_copy(lev,1:size(entry%local_values_r4_copy,dim=2)), entry%aux_r4, entry%root_rank, tag, entry%comm, entry%p_partit) end if - if (mype==entry%root_rank) then + if (entry%p_partit%mype==entry%root_rank) then if (entry%ndim==1) then call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, entry%rec_count/), (/size2, 1/), entry%aux_r4, 1), __LINE__) elseif (entry%ndim==2) then - call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/lev, 1, entry%rec_count/), (/1, size2, 1/), entry%aux_r4, 1), __LINE__) + call assert_nf( nf_put_vara_real(entry%ncid, entry%varID, (/1, lev, entry%rec_count/), (/size2, 1, 1/), entry%aux_r4, 1), __LINE__) end if end if end do @@ -748,63 +793,93 @@ subroutine write_mean(entry, entry_index) subroutine update_means - use g_PARSUP implicit none type(Meandata), pointer :: entry integer :: n + integer :: I, J - do n=1, io_NSTREAMS + DO n=1, io_NSTREAMS entry=>io_stream(n) !_____________ compute in 8 byte accuracy _________________________ - if (entry%accuracy == i_real8) then - if (entry%flip) then - entry%local_values_r8 = entry%local_values_r8 + transpose(entry%ptr3(1:size(entry%local_values_r8,dim=2),1:size(entry%local_values_r8,dim=1))) - else - entry%local_values_r8 = entry%local_values_r8 + entry%ptr3(1:size(entry%local_values_r8,dim=1),1:size(entry%local_values_r8,dim=2)) - end if + IF (entry%accuracy == i_real8) then + IF (entry%flip) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r8,dim=2) + DO I=1, size(entry%local_values_r8,dim=1) + entry%local_values_r8(I,J)=entry%local_values_r8(I,J)+entry%ptr3(J,I) + END DO + END DO +!$OMP END PARALLEL DO + ELSE +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r8,dim=2) + DO I=1, size(entry%local_values_r8,dim=1) + entry%local_values_r8(I,J)=entry%local_values_r8(I,J)+entry%ptr3(I,J) + END DO + END DO +!$OMP END PARALLEL DO + END IF !_____________ compute in 4 byte accuracy _________________________ - elseif (entry%accuracy == i_real4) then - if (entry%flip) then - entry%local_values_r4 = entry%local_values_r4 + transpose(real(entry%ptr3(1:size(entry%local_values_r4,dim=2),1:size(entry%local_values_r4,dim=1)),real32)) - else - entry%local_values_r4 = entry%local_values_r4 + real(entry%ptr3(1:size(entry%local_values_r4,dim=1),1:size(entry%local_values_r4,dim=2)),real32) - end if - endif - + ELSE IF (entry%accuracy == i_real4) then + IF (entry%flip) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r4,dim=2) + DO I=1, size(entry%local_values_r4,dim=1) + entry%local_values_r4(I,J)=entry%local_values_r4(I,J)+real(entry%ptr3(J,I), real32) + END DO + END DO +!$OMP END PARALLEL DO + ELSE +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J) + DO J=1, size(entry%local_values_r4,dim=2) + DO I=1, size(entry%local_values_r4,dim=1) + entry%local_values_r4(I,J)=entry%local_values_r4(I,J)+real(entry%ptr3(I,J), real32) + END DO + END DO +!$OMP END PARALLEL DO + END IF + END IF entry%addcounter=entry%addcounter+1 - end do + END DO end subroutine ! !-------------------------------------------------------------------------------------------- ! -subroutine output(istep, mesh) +subroutine output(istep, ice, dynamics, tracers, partit, mesh) use g_clock use mod_mesh - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + use MOD_ICE + use mod_tracer use io_gather_module #if defined (__icepack) use icedrv_main, only: init_io_icepack #endif - implicit none - integer :: istep logical, save :: lfirst=.true. integer :: n, k + integer :: i, j !for OMP loops logical :: do_output type(Meandata), pointer :: entry type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(inout), target :: ice + character(:), allocatable :: filepath real(real64) :: rtime !timestamp of the record ctime=timeold+(dayold-1.)*86400 if (lfirst) then - call ini_mean_io(mesh) - call init_io_gather() + call ini_mean_io(ice, dynamics, tracers, partit, mesh) + call init_io_gather(partit) #if defined (__icepack) - call init_io_icepack(mesh) + call init_io_icepack(mesh) !icapack has its copy of p_partit => partit #endif - call init_io_gather() end if call update_means @@ -832,23 +907,23 @@ subroutine output(istep, mesh) else write(*,*) 'You did not specify a supported outputflag.' write(*,*) 'The program will stop to give you opportunity to do it.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) stop endif if (do_output) then - + if (vec_autorotate) call io_r2g(n, partit, mesh) ! automatically detect if a vector field and rotate if makes sense! if(entry%thread_running) call entry%thread%join() entry%thread_running = .false. filepath = trim(ResultPath)//trim(entry%name)//'.'//trim(runid)//'.'//cyearnew//'.nc' - if(mype == entry%root_rank) then + if(partit%mype == entry%root_rank) then if(filepath /= trim(entry%filename)) then if("" /= trim(entry%filename)) call assert_nf(nf_close(entry%ncid), __LINE__) entry%filename = filepath ! use any existing file with this name or create a new one if( nf_open(entry%filename, nf_write, entry%ncid) /= nf_noerr ) then - call create_new_file(entry, mesh) + call create_new_file(entry, ice, dynamics, partit, mesh) call assert_nf( nf_open(entry%filename, nf_write, entry%ncid), __LINE__) end if call assoc_ids(entry) @@ -872,11 +947,23 @@ subroutine output(istep, mesh) end if if (entry%accuracy == i_real8) then - entry%local_values_r8_copy = entry%local_values_r8 /real(entry%addcounter,real64) ! compute_means - entry%local_values_r8 = 0._real64 ! clean_meanarrays +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r8,dim=2) + DO I=1, size(entry%local_values_r8,dim=1) + entry%local_values_r8_copy(I,J) = entry%local_values_r8(I,J) /real(entry%addcounter,real64) ! compute_means + entry%local_values_r8(I,J) = 0._real64 ! clean_meanarrays + END DO + END DO +!$OMP END PARALLEL DO else if (entry%accuracy == i_real4) then - entry%local_values_r4_copy = entry%local_values_r4 /real(entry%addcounter,real32) ! compute_means - entry%local_values_r4 = 0._real32 ! clean_meanarrays +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) + DO J=1, size(entry%local_values_r4,dim=2) + DO I=1, size(entry%local_values_r4,dim=1) + entry%local_values_r4_copy(I,J) = entry%local_values_r4(I,J) /real(entry%addcounter,real32) ! compute_means + entry%local_values_r4(I,J) = 0._real32 ! clean_meanarrays + END DO + END DO +!$OMP END PARALLEL DO end if entry%addcounter = 0 ! clean_meanarrays entry%ctime_copy = ctime @@ -889,18 +976,19 @@ subroutine output(istep, mesh) subroutine do_output_callback(entry_index) -use g_PARSUP use mod_mesh +USE MOD_PARTIT +USE MOD_PARSUP integer, intent(in) :: entry_index ! EO args type(Meandata), pointer :: entry entry=>io_stream(entry_index) - mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) + entry%p_partit%mype=entry%mype_workaround ! for the thread callback, copy back the value of our mype as a workaround for errors with the cray envinronment (at least with ftn 2.5.9 and cray-mpich 7.5.3) call write_mean(entry, entry_index) - if(mype == entry%root_rank) call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write + if(entry%p_partit%mype == entry%root_rank) call assert_nf( nf_sync(entry%ncid), __LINE__ ) ! flush the file to disk after each write end subroutine @@ -917,13 +1005,15 @@ subroutine finalize_output() ! !-------------------------------------------------------------------------------------------- ! -subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, mesh, flip_array) +subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh, flip_array) use mod_mesh - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP implicit none + type(t_partit), intent(inout), target :: partit integer, intent(in) :: glsize(2), lcsize(2) character(len=*), intent(in) :: name, description, units - real(kind=WP), target, intent(inout) :: data(:,:) + real(kind=WP), target, intent(in) :: data(:,:) integer, intent(in) :: freq character, intent(in) :: freq_unit integer, intent(in) :: accuracy @@ -932,18 +1022,20 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr type(t_mesh), intent(in), target :: mesh logical, optional, intent(in) :: flip_array integer i - + +#if !defined(__PGI) do i = 1, rank(data) if ((ubound(data, dim = i)<=0)) then - if (mype==0) then + if (partit%mype==0) then write(*,*) 'WARNING: adding I/O stream for ', trim(name), ' failed (contains 0 dimension)' write(*,*) 'upper bound is: ', ubound(data, dim = i) end if return end if end do +#endif - if (mype==0) then + if (partit%mype==0) then write(*,*) 'adding I/O stream 3D for ', trim(name) end if @@ -973,48 +1065,51 @@ subroutine def_stream3D(glsize, lcsize, name, description, units, data, freq, fr entry%local_values_r4 = 0._real32 end if - entry%dimname(1)=mesh_dimname_from_dimsize(glsize(1), mesh) !2D! mesh_dimname_from_dimsize(glsize, mesh) - entry%dimname(2)=mesh_dimname_from_dimsize(glsize(2), mesh) !2D! entry%dimname(2)='unknown' - + entry%dimname(1)=mesh_dimname_from_dimsize(glsize(1), partit, mesh) !2D! mesh_dimname_from_dimsize(glsize, mesh) + entry%dimname(2)=mesh_dimname_from_dimsize(glsize(2), partit, mesh) !2D! entry%dimname(2)='unknown' ! non dimension specific - call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, mesh) + call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) end subroutine ! !-------------------------------------------------------------------------------------------- ! -subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, mesh) +subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, freq_unit, accuracy, partit, mesh) use mod_mesh - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP implicit none integer, intent(in) :: glsize, lcsize character(len=*), intent(in) :: name, description, units - real(kind=WP), target, intent(inout) :: data(:) + real(kind=WP), target, intent(in) :: data(:) integer, intent(in) :: freq character, intent(in) :: freq_unit integer, intent(in) :: accuracy type(Meandata), allocatable :: tmparr(:) type(Meandata), pointer :: entry - type(t_mesh), intent(in), target :: mesh + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(inout) :: partit integer i - + +#if !defined(__PGI) do i = 1, rank(data) if ((ubound(data, dim = i)<=0)) then - if (mype==0) then + if (partit%mype==0) then write(*,*) 'WARNING: adding I/O stream for ', trim(name), ' failed (contains 0 dimension)' write(*,*) 'upper bound is: ', ubound(data, dim = i) end if return end if end do +#endif - if (mype==0) then + if (partit%mype==0) then write(*,*) 'adding I/O stream 2D for ', trim(name) end if call associate_new_stream(name, entry) ! 2d specific - entry%ptr3(1:1,1:size(data)) => data + entry%ptr3(1:1,1:size(data)) => data(:) if (accuracy == i_real8) then allocate(entry%local_values_r8(1, lcsize)) @@ -1027,11 +1122,11 @@ subroutine def_stream2D(glsize, lcsize, name, description, units, data, freq, fr entry%ndim=1 entry%glsize=(/1, glsize/) - entry%dimname(1)=mesh_dimname_from_dimsize(glsize, mesh) + entry%dimname(1)=mesh_dimname_from_dimsize(glsize, partit, mesh) entry%dimname(2)='unknown' ! non dimension specific - call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, mesh) + call def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) end subroutine @@ -1046,8 +1141,8 @@ subroutine associate_new_stream(name, entry) do i=1, io_NSTREAMS if(trim(io_stream(i)%name) .eq. name) then print *,"variable '"//name//"' already exists, & - check if you define it multiple times, for example in namelist.io, & - namelist.icepack, io_meandata.F90 or other place that add I/O stream." + &check if you define it multiple times, for example in namelist.io, & + &namelist.icepack, io_meandata.F90 or other place that add I/O stream." call assert(.false., __LINE__) end if end do @@ -1059,9 +1154,10 @@ subroutine associate_new_stream(name, entry) end subroutine - subroutine def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, mesh) + subroutine def_stream_after_dimension_specific(entry, name, description, units, freq, freq_unit, accuracy, partit, mesh) use mod_mesh - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use io_netcdf_workaround_module type(Meandata), intent(inout) :: entry character(len=*), intent(in) :: name, description, units @@ -1069,6 +1165,7 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, character, intent(in) :: freq_unit integer, intent(in) :: accuracy type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit ! EO args logical async_netcdf_allowed integer provided_mpi_thread_support_level @@ -1084,8 +1181,8 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, elseif (accuracy == i_real4) then allocate(data_strategy_nf_float_type :: entry%data_strategy) else - if (mype==0) write(*,*) 'not supported output accuracy:',accuracy,'for',trim(name) - call par_ex + if (partit%mype==0) write(*,*) 'not supported output accuracy:',accuracy,'for',trim(name) + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop endif ! accuracy @@ -1104,7 +1201,7 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, else if(entry%glsize(1)==mesh%elem2D .or. entry%glsize(2)==mesh%elem2D) then entry%is_elem_based = .true. else - if(mype == 0) print *,"can not determine if ",trim(name)," is node or elem based" + if(partit%mype == 0) print *,"can not determine if ",trim(name)," is node or elem based" stop end if @@ -1116,9 +1213,9 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, ! set up async output - entry%root_rank = next_io_rank(MPI_COMM_FESOM, async_netcdf_allowed) + entry%root_rank = next_io_rank(partit%MPI_COMM_FESOM, async_netcdf_allowed, partit) - call MPI_Comm_dup(MPI_COMM_FESOM, entry%comm, err) + call MPI_Comm_dup(partit%MPI_COMM_FESOM, entry%comm, err) call entry%thread%initialize(do_output_callback, entry_index) if(.not. async_netcdf_allowed) call entry%thread%disable_async() @@ -1129,7 +1226,8 @@ subroutine def_stream_after_dimension_specific(entry, name, description, units, call MPI_Query_thread(provided_mpi_thread_support_level, err) if(provided_mpi_thread_support_level < MPI_THREAD_MULTIPLE) call entry%thread%disable_async() - entry%mype_workaround = mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads + entry%mype_workaround = partit%mype ! make a copy of the mype variable as there is an error with the cray compiler or environment which voids the global mype for our threads + entry%p_partit=>partit end subroutine @@ -1155,5 +1253,79 @@ subroutine assert(val, line) end if end subroutine -end module + subroutine io_r2g(n, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE g_rotate_grid + implicit none + integer, intent(in) :: n + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: I, J + type(Meandata), pointer :: entry_x, entry_y + real(kind=WP) :: temp_x, temp_y + real(kind=WP) :: xmean, ymean + logical :: do_rotation + + if (n==io_NSTREAMS) RETURN + entry_x=>io_stream(n) + entry_y=>io_stream(n+1) + IF (.NOT. (entry_x%freq_unit==entry_y%freq_unit) .and. ((entry_x%freq==entry_y%freq))) RETURN + IF (entry_x%accuracy /= entry_y%accuracy) RETURN + do_rotation=.FALSE. +! we need to improve the logistic here in order to use this routinely. a new argument in def_stream +! will be needed. + IF ((trim(entry_x%name)=='u' ) .AND. ((trim(entry_y%name)=='v' ))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='uice' ) .AND. ((trim(entry_y%name)=='vice' ))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='unod' ) .AND. ((trim(entry_y%name)=='vnod' ))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='tau_x' ) .AND. ((trim(entry_y%name)=='tau_y '))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='atmice_x') .AND. ((trim(entry_y%name)=='atmice_y'))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='atmoce_x') .AND. ((trim(entry_y%name)=='atmoce_y'))) do_rotation=.TRUE. + IF ((trim(entry_x%name)=='iceoce_x') .AND. ((trim(entry_y%name)=='iceoce_y'))) do_rotation=.TRUE. + + IF (.NOT. (do_rotation)) RETURN + + IF (partit%mype==0) THEN + write(*,*) trim(entry_x%name)//' and '//trim(entry_y%name)//' will be rotated before output!' + END IF + + IF ((entry_x%accuracy == i_real8) .AND. (entry_y%accuracy == i_real8)) THEN +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J, xmean, ymean) + DO J=1, size(entry_x%local_values_r8,dim=2) + if (entry_x%is_elem_based) then + xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP + ymean=sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP + else + xmean=mesh%coord_nod2D(1, J) + ymean=mesh%coord_nod2D(2, J) + end if + DO I=1, size(entry_x%local_values_r8,dim=1) + call vector_r2g(entry_x%local_values_r8(I,J), entry_y%local_values_r8(I,J), xmean, ymean, 0) + END DO + END DO +!$OMP END PARALLEL DO + END IF + + IF ((entry_x%accuracy == i_real4) .AND. (entry_y%accuracy == i_real4)) THEN +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I, J, temp_x, temp_y, xmean, ymean) + DO J=1, size(entry_x%local_values_r4,dim=2) + if (entry_x%is_elem_based) then + xmean=sum(mesh%coord_nod2D(1, mesh%elem2D_nodes(:, J)))/3._WP + ymean=sum(mesh%coord_nod2D(2, mesh%elem2D_nodes(:, J)))/3._WP + else + xmean=mesh%coord_nod2D(1, J) + ymean=mesh%coord_nod2D(2, J) + end if + DO I=1, size(entry_x%local_values_r4,dim=1) + temp_x=real(entry_x%local_values_r4(I,J), real64) + temp_y=real(entry_y%local_values_r4(I,J), real64) + call vector_r2g(temp_x, temp_y, xmean, ymean, 0) + entry_x%local_values_r4(I,J)=real(temp_x, real32) + entry_y%local_values_r4(I,J)=real(temp_y, real32) + END DO + END DO +!$OMP END PARALLEL DO + END IF + end subroutine +end module diff --git a/src/io_mesh_info.F90 b/src/io_mesh_info.F90 index ea58cb4b3..4b80bf9d0 100644 --- a/src/io_mesh_info.F90 +++ b/src/io_mesh_info.F90 @@ -1,9 +1,10 @@ module io_mesh_info -use g_PARSUP -use MOD_MESH +USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP use g_config use g_comm_auto -use o_ARRAYS +use o_PARAM implicit none #include "netcdf.inc" @@ -33,9 +34,10 @@ module io_mesh_info !------------------------------------------------------------------------- ! this routine stores most of metadata used in FESOM. Shall be called at the cold start once during the simulation. ! info: fesom.mesh.diag.nc is 77MB for the CORE II mesh with 47 vertical levels -subroutine write_mesh_info(mesh) +subroutine write_mesh_info(partit, mesh) implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit integer :: status, ncid, j integer :: nod_n_id, elem_n_id, edge_n_id, nod_part_id, elem_part_id integer :: nl_id, nl1_id @@ -50,6 +52,7 @@ subroutine write_mesh_info(mesh) integer :: zbar_e_bot_id,zbar_n_bot_id integer :: elem_id integer :: nod_id + integer :: lon_id, lat_id character(100) :: longname character(2000) :: filename real(kind=WP), allocatable :: rbuffer(:), lrbuffer(:) @@ -58,65 +61,71 @@ subroutine write_mesh_info(mesh) integer :: vtype integer, pointer :: pid -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" call MPI_AllREDUCE(maxval(nod_in_elem2D_num), N_max, 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) filename=trim(ResultPath)//runid//'.mesh.diag.nc' - call my_create(filename, IOR(NF_CLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), ncid) + call my_create(filename, IOR(NF_CLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), ncid, partit) !Define the dimensions - call my_def_dim(ncid, 'nod2', nod2D, nod_n_id) - call my_def_dim(ncid, 'edg_n', edge2d, edge_n_id) - call my_def_dim(ncid, 'elem', elem2d, elem_n_id) - call my_def_dim(ncid, 'nz', nl, nl_id) - call my_def_dim(ncid, 'nz1', nl-1, nl1_id) - call my_def_dim(ncid, 'n2', 2, id_2) - call my_def_dim(ncid, 'n3', 3, id_3) - call my_def_dim(ncid, 'n4', 4, id_4) - call my_def_dim(ncid, 'N', N_max, id_N) + call my_def_dim(ncid, 'nod2', nod2D, nod_n_id, partit) + call my_def_dim(ncid, 'edg_n', edge2d, edge_n_id, partit) + call my_def_dim(ncid, 'elem', elem2d, elem_n_id, partit) + call my_def_dim(ncid, 'nz', nl, nl_id, partit) + call my_def_dim(ncid, 'nz1', nl-1, nl1_id, partit) + call my_def_dim(ncid, 'n2', 2, id_2, partit) + call my_def_dim(ncid, 'n3', 3, id_3, partit) + call my_def_dim(ncid, 'n4', 4, id_4, partit) + call my_def_dim(ncid, 'N', N_max, id_N, partit) !Define the variables ! 1D - call my_def_var(ncid, 'nz', NF_DOUBLE, 1, (/nl_id /), zbar_id, 'depth of levels' ) - call my_def_var(ncid, 'nz1', NF_DOUBLE, 1, (/nl1_id/), z_id, 'depth of layers' ) - call my_def_var(ncid, 'elem_area', NF_DOUBLE, 1, (/elem_n_id/), elem_area_id, 'element areas' ) - call my_def_var(ncid, 'nlevels_nod2D', NF_INT, 1, (/nod_n_id/), nlevels_nod2D_id, 'number of levels below nodes' ) - call my_def_var(ncid, 'nlevels', NF_INT, 1, (/elem_n_id/), nlevels_id, 'number of levels below elements' ) - call my_def_var(ncid, 'nod_in_elem2D_num', NF_INT, 1, (/nod_n_id/), nod_in_elem2D_num_id, 'number of elements containing the node') - call my_def_var(ncid, 'nod_part', NF_INT, 1, (/nod_n_id/), nod_part_id, 'nodal partitioning at the cold start' ) - call my_def_var(ncid, 'elem_part', NF_INT, 1, (/elem_n_id/), elem_part_id, 'element partitioning at the cold start') - call my_def_var(ncid, 'zbar_e_bottom', NF_DOUBLE, 1, (/elem_n_id/), zbar_e_bot_id, 'element bottom depth') - call my_def_var(ncid, 'zbar_n_bottom', NF_DOUBLE, 1, (/nod_n_id/) , zbar_n_bot_id, 'nodal bottom depth') + call my_def_var(ncid, 'nz', NF_DOUBLE, 1, (/nl_id /), zbar_id, 'depth of levels', partit) + call my_def_var(ncid, 'nz1', NF_DOUBLE, 1, (/nl1_id/), z_id, 'depth of layers', partit) + call my_def_var(ncid, 'elem_area', NF_DOUBLE, 1, (/elem_n_id/), elem_area_id, 'element areas', partit) + call my_def_var(ncid, 'nlevels_nod2D', NF_INT, 1, (/nod_n_id/), nlevels_nod2D_id, 'number of levels below nodes', partit) + call my_def_var(ncid, 'nlevels', NF_INT, 1, (/elem_n_id/), nlevels_id, 'number of levels below elements', partit) + call my_def_var(ncid, 'nod_in_elem2D_num', NF_INT, 1, (/nod_n_id/), nod_in_elem2D_num_id, 'number of elements containing the node', partit) + call my_def_var(ncid, 'nod_part', NF_INT, 1, (/nod_n_id/), nod_part_id, 'nodal partitioning at the cold start', partit) + call my_def_var(ncid, 'elem_part', NF_INT, 1, (/elem_n_id/), elem_part_id, 'element partitioning at the cold start', partit) + call my_def_var(ncid, 'zbar_e_bottom', NF_DOUBLE, 1, (/elem_n_id/), zbar_e_bot_id, 'element bottom depth', partit) + call my_def_var(ncid, 'zbar_n_bottom', NF_DOUBLE, 1, (/nod_n_id/), zbar_n_bot_id, 'nodal bottom depth', partit) + call my_def_var(ncid, 'lon', NF_DOUBLE, 1, (/nod_n_id/), lon_id, 'longitude', partit) + call my_def_var(ncid, 'lat', NF_DOUBLE, 1, (/nod_n_id/), lat_id, 'latitude', partit) + ! 2D - call my_def_var(ncid, 'nod_area', NF_DOUBLE, 2, (/nod_n_id, nl_id/), nod_area_id, 'nodal areas' ) - call my_def_var(ncid, 'elem', NF_INT, 2, (/elem_n_id, id_3/), elem_id, 'elements' ) - call my_def_var(ncid, 'nodes', NF_DOUBLE, 2, (/nod_n_id, id_2/), nod_id, 'nodal geo. coordinates' ) - call my_def_var(ncid, 'nod_in_elem2D', NF_INT, 2, (/nod_n_id, id_N/), nod_in_elem2D_id, 'elements containing the node') - call my_def_var(ncid, 'edges', NF_INT, 2, (/edge_n_id, id_2/), edges_id, 'edges' ) - call my_def_var(ncid, 'edge_tri', NF_INT, 2, (/edge_n_id, id_2/), edge_tri_id, 'edge triangles' ) - call my_def_var(ncid, 'edge_cross_dxdy', NF_DOUBLE, 2, (/edge_n_id, id_4/), edge_cross_dxdy_id, 'edge cross distancess' ) - call my_def_var(ncid, 'gradient_sca_x', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_x_id, 'x component of a gradient at nodes of an element') - call my_def_var(ncid, 'gradient_sca_y', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_y_id, 'y component of a gradient at nodes of an element') - call my_nf_enddef(ncid) + call my_def_var(ncid, 'nod_area', NF_DOUBLE, 2, (/nod_n_id, nl_id/), nod_area_id, 'nodal areas', partit) + call my_def_var(ncid, 'elements', NF_INT, 2, (/elem_n_id, id_3/), elem_id, 'elements', partit) + call my_def_var(ncid, 'nodes', NF_DOUBLE, 2, (/nod_n_id, id_2/), nod_id, 'nodal geo. coordinates', partit) + call my_def_var(ncid, 'nod_in_elem2D', NF_INT, 2, (/nod_n_id, id_N/), nod_in_elem2D_id, 'elements containing the node', partit) + call my_def_var(ncid, 'edges', NF_INT, 2, (/edge_n_id, id_2/), edges_id, 'edges', partit) + call my_def_var(ncid, 'edge_tri', NF_INT, 2, (/edge_n_id, id_2/), edge_tri_id, 'edge triangles', partit) + call my_def_var(ncid, 'edge_cross_dxdy', NF_DOUBLE, 2, (/edge_n_id, id_4/), edge_cross_dxdy_id, 'edge cross distancess', partit) + call my_def_var(ncid, 'gradient_sca_x', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_x_id, 'x component of a gradient at nodes of an element', partit) + call my_def_var(ncid, 'gradient_sca_y', NF_DOUBLE, 2, (/id_3, elem_n_id/), gradient_sca_y_id, 'y component of a gradient at nodes of an element', partit) + call my_nf_enddef(ncid, partit) ! vercical levels/layers - call my_put_vara(ncid, zbar_id, 1, nl, zbar) - call my_put_vara(ncid, z_id, 1, nl-1, Z) + call my_put_vara(ncid, zbar_id, 1, nl, zbar, partit) + call my_put_vara(ncid, z_id, 1, nl-1, Z, partit) ! nodal areas allocate(rbuffer(nod2D)) do k=1, nl - call gather_nod(area(k, :), rbuffer) - call my_put_vara(ncid, nod_area_id, (/1, k/), (/nod2D, 1/), rbuffer) + call gather_nod(area(k, :), rbuffer, partit) + call my_put_vara(ncid, nod_area_id, (/1, k/), (/nod2D, 1/), rbuffer, partit) end do deallocate(rbuffer) ! element areas allocate(rbuffer(elem2D)) - call gather_elem(elem_area(1:myDim_elem2D), rbuffer) - call my_put_vara(ncid, elem_area_id, 1, elem2D, rbuffer) + call gather_elem(elem_area(1:myDim_elem2D), rbuffer, partit) + call my_put_vara(ncid, elem_area_id, 1, elem2D, rbuffer, partit) deallocate(rbuffer) ! elements @@ -126,27 +135,27 @@ subroutine write_mesh_info(mesh) do k=1, myDim_elem2D lbuffer(k)=myList_nod2D(elem2d_nodes(i, k)) end do - call gather_elem(lbuffer, ibuffer) - call my_put_vara(ncid, elem_id, (/1, i/), (/elem2D, 1/), ibuffer) + call gather_elem(lbuffer, ibuffer, partit) + call my_put_vara(ncid, elem_id, (/1, i/), (/elem2D, 1/), ibuffer, partit) end do deallocate(lbuffer, ibuffer) ! number of levels below elements allocate(ibuffer(elem2D)) - call gather_elem(nlevels(1:myDim_elem2D), ibuffer) - call my_put_vara(ncid, nlevels_id, 1, elem2D, ibuffer) + call gather_elem(nlevels(1:myDim_elem2D), ibuffer, partit) + call my_put_vara(ncid, nlevels_id, 1, elem2D, ibuffer, partit) deallocate(ibuffer) ! number of levels below nodes allocate(ibuffer(nod2D)) - call gather_nod(nlevels_nod2D(1:myDim_nod2D), ibuffer) - call my_put_vara(ncid, nlevels_nod2D_id, 1, nod2D, ibuffer) + call gather_nod(nlevels_nod2D(1:myDim_nod2D), ibuffer, partit) + call my_put_vara(ncid, nlevels_nod2D_id, 1, nod2D, ibuffer, partit) deallocate(ibuffer) ! number of elements containing the node allocate(ibuffer(nod2D)) - call gather_nod(nod_in_elem2D_num(1:myDim_nod2D), ibuffer) - call my_put_vara(ncid, nod_in_elem2D_num_id, 1, nod2D, ibuffer) + call gather_nod(nod_in_elem2D_num(1:myDim_nod2D), ibuffer, partit) + call my_put_vara(ncid, nod_in_elem2D_num_id, 1, nod2D, ibuffer, partit) deallocate(ibuffer) ! elements containing the node @@ -159,8 +168,8 @@ subroutine write_mesh_info(mesh) lbuffer(k)=myList_elem2D(nod_in_elem2D(i, k)) end if end do - call gather_nod(lbuffer, ibuffer) - call my_put_vara(ncid, nod_in_elem2D_id, (/1, i/), (/nod2D, 1/), ibuffer) + call gather_nod(lbuffer, ibuffer, partit) + call my_put_vara(ncid, nod_in_elem2D_id, (/1, i/), (/nod2D, 1/), ibuffer, partit) END DO deallocate(lbuffer, ibuffer) @@ -168,24 +177,30 @@ subroutine write_mesh_info(mesh) allocate(ibuffer(nod2D)) allocate(lbuffer(myDim_nod2D)) lbuffer=mype - call gather_nod(lbuffer, ibuffer) - call my_put_vara(ncid, nod_part_id, 1, nod2D, ibuffer) + call gather_nod(lbuffer, ibuffer, partit) + call my_put_vara(ncid, nod_part_id, 1, nod2D, ibuffer, partit) deallocate(lbuffer, ibuffer) ! element partitioning allocate(ibuffer(elem2D)) allocate(lbuffer(myDim_elem2D)) lbuffer=mype - call gather_elem(lbuffer, ibuffer) - call my_put_vara(ncid, elem_part_id, 1, elem2D, ibuffer) + call gather_elem(lbuffer, ibuffer, partit) + call my_put_vara(ncid, elem_part_id, 1, elem2D, ibuffer, partit) deallocate(lbuffer, ibuffer) ! nodes (GEO coordinates) allocate(rbuffer(nod2D)) do i=1, 2 - call gather_nod(geo_coord_nod2D(i, 1:myDim_nod2D), rbuffer) - call my_put_vara(ncid, nod_id, (/1, i/), (/nod2D, 1/), rbuffer) + call gather_nod(geo_coord_nod2D(i, 1:myDim_nod2D), rbuffer, partit) + rbuffer = rbuffer/rad + call my_put_vara(ncid, nod_id, (/1, i/), (/nod2D, 1/), rbuffer, partit) + if (i == 1) then + call my_put_vara(ncid, lon_id, 1, nod2D, rbuffer, partit) + else + call my_put_vara(ncid, lat_id, 1, nod2D, rbuffer, partit) + endif end do deallocate(rbuffer) @@ -196,8 +211,8 @@ subroutine write_mesh_info(mesh) do k=1, myDim_edge2D lbuffer(k)=myList_nod2D(edges(i, k)) end do - call gather_edge(lbuffer, ibuffer) - call my_put_vara(ncid, edges_id, (/1, i/), (/edge2D, 1/), ibuffer) + call gather_edge(lbuffer, ibuffer, partit) + call my_put_vara(ncid, edges_id, (/1, i/), (/edge2D, 1/), ibuffer, partit) end do deallocate(lbuffer, ibuffer) @@ -212,8 +227,8 @@ subroutine write_mesh_info(mesh) lbuffer(k) = 0 endif end do - call gather_edge(lbuffer, ibuffer) - call my_put_vara(ncid, edge_tri_id, (/1, i/), (/edge2D, 1/), ibuffer) + call gather_edge(lbuffer, ibuffer, partit) + call my_put_vara(ncid, edge_tri_id, (/1, i/), (/edge2D, 1/), ibuffer, partit) end do deallocate(lbuffer, ibuffer) @@ -222,8 +237,8 @@ subroutine write_mesh_info(mesh) allocate(lrbuffer(myDim_edge2D)) do i=1, 4 lrbuffer=edge_cross_dxdy(i, 1:myDim_edge2D) - call gather_edge(lrbuffer, rbuffer) - call my_put_vara(ncid, edge_cross_dxdy_id, (/1, i/), (/edge2D, 1/), rbuffer) + call gather_edge(lrbuffer, rbuffer, partit) + call my_put_vara(ncid, edge_cross_dxdy_id, (/1, i/), (/edge2D, 1/), rbuffer, partit) end do deallocate(rbuffer, lrbuffer) @@ -231,184 +246,187 @@ subroutine write_mesh_info(mesh) ! X component of gadient at elements allocate(rbuffer(elem2D)) do i=1, 3 - call gather_elem(gradient_sca(i, 1:myDim_elem2D), rbuffer) - call my_put_vara(ncid, gradient_sca_x_id, (/4-i, 1/), (/1, elem2D/), rbuffer) ! (4-i), NETCDF will permute otherwise + call gather_elem(gradient_sca(i, 1:myDim_elem2D), rbuffer, partit) + call my_put_vara(ncid, gradient_sca_x_id, (/4-i, 1/), (/1, elem2D/), rbuffer, partit) ! (4-i), NETCDF will permute otherwise end do deallocate(rbuffer) ! Y component of gadient at elements allocate(rbuffer(elem2D)) do i=1, 3 - call gather_elem(gradient_sca(i+3, 1:myDim_elem2D), rbuffer) - call my_put_vara(ncid, gradient_sca_y_id, (/4-i, 1/), (/1, elem2D/), rbuffer) ! (4-i), NETCDF will permute otherwise + call gather_elem(gradient_sca(i+3, 1:myDim_elem2D), rbuffer, partit) + call my_put_vara(ncid, gradient_sca_y_id, (/4-i, 1/), (/1, elem2D/), rbuffer, partit)! (4-i), NETCDF will permute otherwise end do deallocate(rbuffer) ! nodal bottom depth (take into account partial cells if used) allocate(rbuffer(nod2D)) - call gather_nod(zbar_n_bot(1:myDim_nod2D), rbuffer) - call my_put_vara(ncid, zbar_n_bot_id, 1, nod2D, rbuffer) + call gather_nod(zbar_n_bot(1:myDim_nod2D), rbuffer, partit) + call my_put_vara(ncid, zbar_n_bot_id, 1, nod2D, rbuffer, partit) deallocate(rbuffer) ! element bottom depth (take into account partial cells if used) allocate(rbuffer(elem2D)) - call gather_elem(zbar_e_bot(1:myDim_elem2D), rbuffer) - call my_put_vara(ncid, zbar_e_bot_id, 1, elem2D, rbuffer) + call gather_elem(zbar_e_bot(1:myDim_elem2D), rbuffer, partit) + call my_put_vara(ncid, zbar_e_bot_id, 1, elem2D, rbuffer, partit) deallocate(rbuffer) - call my_close(ncid) + call my_close(ncid, partit) end subroutine write_mesh_info ! !============================================================================ ! -subroutine my_def_dim(ncid, short_name, value, id) +subroutine my_def_dim(ncid, short_name, value, id, partit) IMPLICIT NONE -integer, intent(in) :: ncid, value -character(*), intent(in) :: short_name -integer, intent(inout):: id -integer :: ierror, status +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, value +character(*), intent(in) :: short_name +integer, intent(inout) :: id +integer :: ierror, status -if (mype==0) then +if (partit%mype==0) then status = nf_def_dim(ncid, trim(short_name), value, id) end if -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_def_dim ! !============================================================================ ! -subroutine my_def_var(ncid, short_name, vtype, dsize, dids, id, att_text) +subroutine my_def_var(ncid, short_name, vtype, dsize, dids, id, att_text, partit) IMPLICIT NONE -integer, intent(in) :: ncid, dsize, dids(dsize), vtype -character(*), intent(in) :: short_name, att_text -integer, intent(inout):: id -integer :: ierror, status +type(t_partit), intent(inout):: partit +integer, intent(in) :: ncid, dsize, dids(dsize), vtype +character(*), intent(in) :: short_name, att_text +integer, intent(inout):: id +integer :: ierror, status -if (mype==0) then +if (partit%mype==0) then status = nf_def_var(ncid, trim(short_name), vtype, dsize, dids, id) end if -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) -if (mype==0) then +if (partit%mype==0) then status = nf_put_att_text(ncid, id, 'long_name', len_trim(att_text), trim(att_text)); end if -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_def_var ! !============================================================================ ! -subroutine my_nf_enddef(ncid) +subroutine my_nf_enddef(ncid, partit) IMPLICIT NONE -integer, intent(in) :: ncid -integer :: ierror, status +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid +integer :: ierror, status -if (mype==0) then +if (partit%mype==0) then status = nf_enddef(ncid) end if -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_nf_enddef ! !============================================================================ ! -subroutine my_put_vara_double_1D(ncid, varid, start, N, var) +subroutine my_put_vara_double_1D(ncid, varid, start, N, var, partit) IMPLICIT NONE +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, varid, start, N +real(kind=WP) :: var(:) +integer :: ierror, status -integer, intent(in) :: ncid, varid, start, N -real(kind=WP) :: var(:) -integer :: ierror, status - - - if (mype==0) status=nf_put_vara_double(ncid, varid, start, N, var) - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + if (partit%mype==0) status=nf_put_vara_double(ncid, varid, start, N, var) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_put_vara_double_1D ! !============================================================================ ! -subroutine my_put_vara_double_2D(ncid, varid, start, N, var) +subroutine my_put_vara_double_2D(ncid, varid, start, N, var, partit) IMPLICIT NONE +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, varid, start(:), N(:) +real(kind=WP) :: var(:) +integer :: ierror, status -integer, intent(in) :: ncid, varid, start(:), N(:) -real(kind=WP) :: var(:) -integer :: ierror, status - - if (mype==0) status=nf_put_vara_double(ncid, varid, start, N, var) - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + if (partit%mype==0) status=nf_put_vara_double(ncid, varid, start, N, var) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_put_vara_double_2D ! !============================================================================ ! -subroutine my_put_vara_int_1D(ncid, varid, start, N, var) +subroutine my_put_vara_int_1D(ncid, varid, start, N, var, partit) IMPLICIT NONE +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, varid, start, N +integer :: var(:) +integer :: ierror, status -integer, intent(in) :: ncid, varid, start, N -integer :: var(:) -integer :: ierror, status - - - if (mype==0) status=nf_put_vara_int(ncid, varid, start, N, var) - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + if (partit%mype==0) status=nf_put_vara_int(ncid, varid, start, N, var) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_put_vara_int_1D ! !============================================================================ ! -subroutine my_put_vara_int_2D(ncid, varid, start, N, var) +subroutine my_put_vara_int_2D(ncid, varid, start, N, var, partit) IMPLICIT NONE -integer, intent(in) :: ncid, varid, start(:), N(:) -integer :: var(:) -integer :: ierror, status +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid, varid, start(:), N(:) +integer :: var(:) +integer :: ierror, status - if (mype==0) status=nf_put_vara_int(ncid, varid, start, N, var) - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + if (partit%mype==0) status=nf_put_vara_int(ncid, varid, start, N, var) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_put_vara_int_2D ! !============================================================================ ! -subroutine my_create(filename, opt, ncid) +subroutine my_create(filename, opt, ncid, partit) IMPLICIT NONE -integer, intent(in) :: opt, ncid -character(*), intent(in) :: filename -integer :: ierror, status - if (mype==0) then ! create a file +type(t_partit), intent(inout):: partit +integer, intent(in) :: opt, ncid +character(*), intent(in) :: filename +integer :: ierror, status + if (partit%mype==0) then ! create a file ! create a file status = nf_create(filename, opt, ncid) - if (status.ne.nf_noerr) call handle_err(status) + if (status.ne.nf_noerr) call handle_err(status, partit) end if - call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (status .ne. nf_noerr) call handle_err(status) + call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) + if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_create ! !============================================================================ ! -subroutine my_close(ncid) +subroutine my_close(ncid, partit) IMPLICIT NONE -integer, intent(in) :: ncid -integer :: ierror, status +type(t_partit), intent(inout) :: partit +integer, intent(in) :: ncid +integer :: ierror, status -if (mype==0) status = nf_close(ncid) +if (partit%mype==0) status = nf_close(ncid) -call MPI_BCast(status, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) -if (status .ne. nf_noerr) call handle_err(status) +call MPI_BCast(status, 1, MPI_INTEGER, 0, partit%MPI_COMM_FESOM, ierror) +if (status .ne. nf_noerr) call handle_err(status, partit) end subroutine my_close - end module io_mesh_info diff --git a/src/io_netcdf_attribute_module.F90 b/src/io_netcdf_attribute_module.F90 new file mode 100644 index 000000000..a04409b13 --- /dev/null +++ b/src/io_netcdf_attribute_module.F90 @@ -0,0 +1,74 @@ +module io_netcdf_attribute_module + implicit none + public att_type, att_type_text, att_type_int + private + + + type, abstract :: att_type + character(:), allocatable :: name + contains + procedure(define_in_var), deferred :: define_in_var + end type + + + interface + subroutine define_in_var(this, fileid, varid) + import att_type + class(att_type), intent(inout) :: this + integer, intent(in) :: fileid + integer, intent(in) :: varid + end subroutine + end interface + + + type, extends(att_type) :: att_type_text + character(:), allocatable :: text + contains + procedure :: define_in_var => define_in_var_text + end type + + + type, extends(att_type) :: att_type_int + integer :: val + contains + procedure :: define_in_var => define_in_var_int + end type + + +contains + + + subroutine define_in_var_text(this, fileid, varid) + class(att_type_text), intent(inout) :: this + integer, intent(in) :: fileid + integer, intent(in) :: varid + ! EO parameters + include "netcdf.inc" + + call assert_nc( nf_put_att_text(fileid, varid, this%name, len(this%text), this%text) , __LINE__) + end subroutine + + + subroutine define_in_var_int(this, fileid, varid) + class(att_type_int), intent(inout) :: this + integer, intent(in) :: fileid + integer, intent(in) :: varid + ! EO parameters + include "netcdf.inc" + + call assert_nc( nf_put_att_int(fileid, varid, this%name, nf_int, 1, this%val) , __LINE__) + end subroutine + + + subroutine assert_nc(status, line) + integer, intent(in) :: status + integer, intent(in) :: line + ! EO parameters + include "netcdf.inc" + if(status /= nf_noerr) then + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + stop 1 + endif + end subroutine + +end module diff --git a/src/io_netcdf_file_module.F90 b/src/io_netcdf_file_module.F90 new file mode 100644 index 000000000..9eb6a245a --- /dev/null +++ b/src/io_netcdf_file_module.F90 @@ -0,0 +1,600 @@ +module io_netcdf_file_module + use io_netcdf_attribute_module + implicit none + public netcdf_file_type + private + + type dim_type + character(:), allocatable :: name + integer len + integer ncid + end type + + type att_type_wrapper ! work around Fortran not being able to have polymorphic types in the same array + class(att_type), allocatable :: it + end type + + type var_type ! todo: use variable type from io_netcdf_module here + character(:), allocatable :: name + integer, allocatable :: dim_indices(:) + integer datatype + type(att_type_wrapper) :: atts(15) ! use a fixed size array to store our netcdf variable attributes as nvfortran seems to loose allocation of derived types which contain allocatable types when copying the array + integer :: atts_count = 0 + integer ncid + end type + + type netcdf_file_type + private + type(dim_type), allocatable :: dims(:) + type(var_type), allocatable :: vars(:) + type(att_type_wrapper), allocatable :: gatts(:) + + character(:), allocatable :: filepath + integer ncid + contains + procedure, public :: initialize, add_dim, add_dim_unlimited, add_var_double, add_var_real, add_var_int, open_read, flush_file, close_file, open_write_create, open_write_append + procedure, public :: is_attached, read_var_shape + procedure, public :: ndims + generic, public :: read_var => read_var_r4, read_var_r8, read_var_integer + generic, public :: write_var => write_var_r4, write_var_r8, write_var_integer + generic, public :: read_var1 => read_var1_r4, read_var1_r8, read_var1_integer + generic, public :: add_var_att => add_var_att_text, add_var_att_int + generic, public :: add_global_att => add_global_att_text, add_global_att_int + procedure, private :: read_var_r4, read_var_r8, read_var_integer, attach_dims_vars_to_file, add_var_x, write_var_r4, write_var_r8, write_var_integer, add_var_att_text, add_var_att_int + procedure, private :: read_var1_r4, read_var1_r8, read_var1_integer + procedure, private :: add_global_att_text, add_global_att_int + end type + +contains + + + subroutine initialize(this) + class(netcdf_file_type), intent(inout) :: this + + this%filepath = "" + allocate(this%dims(0)) + allocate(this%vars(0)) + allocate(this%gatts(0)) + end subroutine + + + function add_dim_unlimited(this, name) result(dimindex) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer dimindex + ! EO parameters + include "netcdf.inc" + + dimindex = this%add_dim(name, nf_unlimited) + end function + + + function add_dim(this, name, len) result(dimindex) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: len + integer dimindex + ! EO parameters + type(dim_type), allocatable :: tmparr(:) + + ! assume the dims array is allocated + allocate( tmparr(size(this%dims)+1) ) + tmparr(1:size(this%dims)) = this%dims + deallocate(this%dims) + call move_alloc(tmparr, this%dims) + + dimindex = size(this%dims) + this%dims(dimindex) = dim_type(name=name, len=len, ncid=-1) + end function + + + ! return number of specified dimensions (which might be less dimensions than an attached file has) + function ndims(this) + class(netcdf_file_type), intent(inout) :: this + integer ndims + ! EO parameters + + ndims = size(this%dims) + end function + + + ! the sizes of the dims define the global shape of the var + function add_var_double(this, name, dim_indices) result(varindex) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: dim_indices(:) + integer varindex + ! EO parameters + include "netcdf.inc" + + varindex = this%add_var_x(name, dim_indices, nf_double) + end function + + + ! the sizes of the dims define the global shape of the var + function add_var_real(this, name, dim_indices) result(varindex) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: dim_indices(:) + integer varindex + ! EO parameters + include "netcdf.inc" + + varindex = this%add_var_x(name, dim_indices, nf_real) + end function + + + ! the sizes of the dims define the global shape of the var + function add_var_int(this, name, dim_indices) result(varindex) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: dim_indices(:) + integer varindex + ! EO parameters + include "netcdf.inc" + + varindex = this%add_var_x(name, dim_indices, nf_int) + end function + + + function add_var_x(this, name, dim_indices, netcdf_datatype) result(varindex) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: name + integer, intent(in) :: dim_indices(:) + integer netcdf_datatype + integer varindex + ! EO parameters + include "netcdf.inc" + type(var_type), allocatable :: tmparr(:) + + ! assume the vars array is allocated + allocate( tmparr(size(this%vars)+1) ) + tmparr(1:size(this%vars)) = this%vars(:) + deallocate(this%vars) + call move_alloc(tmparr, this%vars) + + varindex = size(this%vars) +! this%vars(varindex) = var_type(name=name, dim_indices=dim_indices, datatype=netcdf_datatype, atts=empty_atts, ncid=-1) +! NVIDIA 22.1 compiler didnt like the line above, hence we unfold it unelegantly: + this%vars(varindex)%name = name + this%vars(varindex)%dim_indices= dim_indices + this%vars(varindex)%datatype = netcdf_datatype + this%vars(varindex)%ncid = -1 + end function + + + subroutine add_global_att_text(this, att_name, att_text) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: att_name + character(len=*), intent(in) :: att_text + ! EO parameters + type(att_type_wrapper), allocatable :: tmparr(:) + + allocate( tmparr(size(this%gatts)+1) ) + tmparr(1:size(this%gatts)) = this%gatts + deallocate(this%gatts) + call move_alloc(tmparr, this%gatts) + + this%gatts( size(this%gatts) )%it = att_type_text(name=att_name, text=att_text) + end subroutine + + + subroutine add_global_att_int(this, att_name, att_val) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: att_name + integer, intent(in) :: att_val + ! EO parameters + type(att_type_wrapper), allocatable :: tmparr(:) + + allocate( tmparr(size(this%gatts)+1) ) + tmparr(1:size(this%gatts)) = this%gatts + deallocate(this%gatts) + call move_alloc(tmparr, this%gatts) + + this%gatts( size(this%gatts) )%it = att_type_int(name=att_name, val=att_val) + end subroutine + + + subroutine add_var_att_text(this, varindex, att_name, att_text) + class(netcdf_file_type), intent(inout) :: this + integer, intent(in) :: varindex + character(len=*), intent(in) :: att_name + character(len=*), intent(in) :: att_text + ! EO parameters + type(att_type_wrapper), allocatable :: tmparr(:) + type(att_type_text) att + + ! add this att_type instance to atts array + this%vars(varindex)%atts_count = this%vars(varindex)%atts_count +1 + call assert(size(this%vars(varindex)%atts) >= this%vars(varindex)%atts_count, __LINE__) + + att = att_type_text(name=att_name, text=att_text) + allocate( this%vars(varindex)%atts( this%vars(varindex)%atts_count )%it, source=att ) + end subroutine + + + subroutine add_var_att_int(this, varindex, att_name, att_val) + class(netcdf_file_type), intent(inout) :: this + integer, intent(in) :: varindex + character(len=*), intent(in) :: att_name + integer, intent(in) :: att_val + ! EO parameters + type(att_type_wrapper), allocatable :: tmparr(:) + type(att_type_int) att + + ! add this att_type instance to atts array + this%vars(varindex)%atts_count = this%vars(varindex)%atts_count +1 + call assert(size(this%vars(varindex)%atts) >= this%vars(varindex)%atts_count, __LINE__) + + att = att_type_int(name=att_name, val=att_val) + allocate( this%vars(varindex)%atts( this%vars(varindex)%atts_count )%it, source=att ) + end subroutine + + + function is_attached(this) result(x) + class(netcdf_file_type), intent(in) :: this + logical x + ! EO parameters + + x = (this%filepath .ne. "") + end function + + + subroutine open_read(this, filepath) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: filepath + ! EO parameters + include "netcdf.inc" + integer mode + + mode = nf_nowrite + this%filepath = filepath + + call assert_nc( nf_open(this%filepath, mode, this%ncid) , __LINE__) + + ! attach our dims and vars to their counterparts in the file + call this%attach_dims_vars_to_file() + end subroutine + + + ! return an array with the dimension sizes for all dimensions of the given variable + ! this currently only makes sense for variables with unlimited dimensions, + ! as all other dimensions must be known when adding the variable to the file specification, e.g before reading the file + subroutine read_var_shape(this, varindex, varshape) + class(netcdf_file_type), target, intent(in) :: this + integer, intent(in) :: varindex + integer, allocatable, intent(out) :: varshape(:) + ! EO parameters + include "netcdf.inc" + type(var_type), pointer :: var + integer var_ndims + integer i + + var => this%vars(varindex) + var_ndims = size(var%dim_indices) + + if(allocated(varshape)) deallocate(varshape) + allocate(varshape(var_ndims)) + + do i=1, var_ndims + if(this%dims( var%dim_indices(i) )%len == nf_unlimited) then + ! actually read from the file + call assert_nc( nf_inq_dimlen(this%ncid, this%dims( var%dim_indices(i) )%ncid, varshape(i)) , __LINE__) + else + ! use the dim size which has been set without the file and is thus known anyway to the user + varshape(i) = this%dims( var%dim_indices(i) )%len + end if + end do + end subroutine + + + ! values array is not required to have the same shape as the variable but must fit the product of all items of the sizes array + ! this way we can retrieve e.g. data from a 3D variable to a 2D array with one size set to 1 (e.g. to get a single timestep) + ! starts and sizes must have the same rank as the variable has dimensions + subroutine read_var_r8(this, varindex, starts, sizes, values) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + real(8), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + include "netcdf.inc" + real(8), pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_double(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + ! see read_var_r8 for usage comment + subroutine read_var_r4(this, varindex, starts, sizes, values) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + real(4), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + include "netcdf.inc" + real(4), pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_real(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + ! see read_var_r8 for usage comment + subroutine read_var_integer(this, varindex, starts, sizes, values) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + integer, intent(inout), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + include "netcdf.inc" + integer, pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_int(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + ! retrieve a single value specified via the indices array + subroutine read_var1_r8(this, varindex, indices, value) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: indices + real(8), intent(out) :: value + ! EO parameters + include "netcdf.inc" + + call assert(size(indices) == size(this%vars(varindex)%dim_indices), __LINE__) + + call assert_nc(nf_get_var1_double(this%ncid, this%vars(varindex)%ncid, indices, value), __LINE__) + end subroutine + + + ! see read_var1_r8 for usage comment + subroutine read_var1_r4(this, varindex, indices, value) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: indices + real(4), intent(out) :: value + ! EO parameters + include "netcdf.inc" + + call assert(size(indices) == size(this%vars(varindex)%dim_indices), __LINE__) + + call assert_nc(nf_get_var1_real(this%ncid, this%vars(varindex)%ncid, indices, value), __LINE__) + end subroutine + + + ! see read_var1_r8 for usage comment + subroutine read_var1_integer(this, varindex, indices, value) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: indices + integer, intent(out) :: value + ! EO parameters + include "netcdf.inc" + + call assert(size(indices) == size(this%vars(varindex)%dim_indices), __LINE__) + + call assert_nc(nf_get_var1_int(this%ncid, this%vars(varindex)%ncid, indices, value), __LINE__) + end subroutine + + + subroutine open_write_create(this, filepath) + class(netcdf_file_type), target, intent(inout) :: this + character(len=*), intent(in) :: filepath + ! EO parameters + include "netcdf.inc" + integer cmode + integer i, ii + integer var_ndims + integer, allocatable :: var_dimids(:) + character(:), pointer :: att_name + character(:), pointer :: att_text + + this%filepath = filepath + + cmode = ior(nf_noclobber, ior(nf_netcdf4, nf_classic_model)) + call assert_nc( nf_create(filepath, cmode, this%ncid) , __LINE__) + + ! create our dims in the file + do i=1, size(this%dims) + call assert_nc( nf_def_dim(this%ncid, this%dims(i)%name, this%dims(i)%len, this%dims(i)%ncid) , __LINE__) + end do + + do i=1, size(this%gatts) + call this%gatts(i)%it%define_in_var(this%ncid, nf_global) + end do + + ! create our vars in the file + do i=1, size(this%vars) + var_ndims = size(this%vars(i)%dim_indices) + if(allocated(var_dimids)) deallocate(var_dimids) + allocate(var_dimids(var_ndims)) + do ii=1, var_ndims + var_dimids(ii) = this%dims( this%vars(i)%dim_indices(ii) )%ncid + end do + call assert_nc( nf_def_var(this%ncid, this%vars(i)%name, this%vars(i)%datatype, var_ndims, var_dimids, this%vars(i)%ncid) , __LINE__) + + do ii=1, this%vars(i)%atts_count + call this%vars(i)%atts(ii)%it%define_in_var(this%ncid, this%vars(i)%ncid) + end do + end do + + call assert_nc( nf_enddef(this%ncid), __LINE__ ) + end subroutine + + + ! open an existing file and prepare to write data to it + subroutine open_write_append(this, filepath) + class(netcdf_file_type), intent(inout) :: this + character(len=*), intent(in) :: filepath + ! EO parameters + include "netcdf.inc" + integer cmode + + this%filepath = filepath + + cmode = nf_write + call assert_nc( nf_open(filepath, cmode, this%ncid) , __LINE__) + + ! make sure that all our dims and vars exist in this file and get hold of them + call this%attach_dims_vars_to_file() + end subroutine + + + subroutine write_var_r8(this, varindex, starts, sizes, values) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + real(8), intent(in), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + include "netcdf.inc" + real(8), pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_put_vara_double(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + subroutine write_var_r4(this, varindex, starts, sizes, values) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + real(4), intent(in), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + include "netcdf.inc" + real(4), pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_put_vara_real(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + subroutine write_var_integer(this, varindex, starts, sizes, values) + use, intrinsic :: ISO_C_BINDING + class(netcdf_file_type), intent(in) :: this + integer, intent(in) :: varindex + integer, dimension(:) :: starts, sizes + integer, intent(in), target :: values(..) ! must be inout or the allocation might be screwed + ! EO parameters + include "netcdf.inc" + integer, pointer :: values_ptr(:) + + call assert(size(sizes) == size(starts), __LINE__) + call assert(size(starts) == size(this%vars(varindex)%dim_indices), __LINE__) + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_put_vara_int(this%ncid, this%vars(varindex)%ncid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + subroutine flush_file(this) + class(netcdf_file_type), intent(inout) :: this + ! EO parameters + include "netcdf.inc" + + call assert_nc( nf_sync(this%ncid), __LINE__ ) ! flush the file to disk + end subroutine + + + subroutine close_file(this) + ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same ncid + class(netcdf_file_type), intent(inout) :: this + ! EO parameters + include "netcdf.inc" + call assert_nc( nf_close(this%ncid) , __LINE__) + + this%filepath = "" + end subroutine + + + ! connect our dims and vars to their counterparts in the NetCDF file, bail out if they do not match + ! ignore any additional dims and vars the file might contain + subroutine attach_dims_vars_to_file(this) + class(netcdf_file_type), intent(inout) :: this + ! EO parameters + include "netcdf.inc" + integer i, ii + integer actual_len + integer actual_dimcount + integer, allocatable :: actual_dimids(:) + integer exp_dimid, act_dimid + integer actual_datatype + + do i=1, size(this%dims) + call assert_nc( nf_inq_dimid(this%ncid, this%dims(i)%name, this%dims(i)%ncid) , __LINE__) + call assert_nc( nf_inq_dimlen(this%ncid, this%dims(i)%ncid, actual_len) , __LINE__) + if(this%dims(i)%len .ne. nf_unlimited) call assert(this%dims(i)%len == actual_len, __LINE__) + end do + do i=1, size(this%vars) + call assert_nc( nf_inq_varid(this%ncid, this%vars(i)%name, this%vars(i)%ncid) , __LINE__) + ! see if this var has the expected datatype + call assert_nc( nf_inq_vartype(this%ncid, this%vars(i)%ncid, actual_datatype) , __LINE__) + call assert(this%vars(i)%datatype == actual_datatype, __LINE__) + ! see if this var has the expected dims + call assert_nc( nf_inq_varndims(this%ncid, this%vars(i)%ncid, actual_dimcount) , __LINE__) + call assert(size(this%vars(i)%dim_indices) == actual_dimcount, __LINE__) + if(allocated(actual_dimids)) deallocate(actual_dimids) + allocate(actual_dimids(actual_dimcount)) + call assert_nc( nf_inq_vardimid(this%ncid, this%vars(i)%ncid, actual_dimids) , __LINE__) + do ii=1, actual_dimcount + exp_dimid = this%dims( this%vars(i)%dim_indices(ii) )%ncid + call assert(exp_dimid == actual_dimids(ii), __LINE__) + end do + end do + end subroutine + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO parameters + if(.not. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine + + + subroutine assert_nc(status, line) + integer, intent(in) :: status + integer, intent(in) :: line + ! EO parameters + include "netcdf.inc" + if(status /= nf_noerr) then + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + stop 1 + endif + end subroutine + +end module diff --git a/src/io_netcdf_module.F90 b/src/io_netcdf_module.F90 new file mode 100644 index 000000000..1fe6f7cd9 --- /dev/null +++ b/src/io_netcdf_module.F90 @@ -0,0 +1,186 @@ +module io_netcdf_module + implicit none + public netcdf_variable_handle + private + + type netcdf_variable_handle + private + character(:), allocatable :: filepath + character(:), allocatable :: varname + integer fileid + integer varid + integer timedim_index + integer, allocatable :: varshape(:) + contains + procedure, public :: initialize, finalize, number_of_timesteps, number_of_dims, dimsize_at + generic, public :: read_values => read_values_r4,read_values_r8 + procedure, private :: open_netcdf_variable, read_values_r4, read_values_r8 + end type + + + contains + + + subroutine initialize(this, filepath, varname) + class(netcdf_variable_handle), intent(inout) :: this + character(len=*), intent(in) :: filepath + character(len=*), intent(in) :: varname + ! EO args + include "netcdf.inc" + integer mode + + this%filepath = filepath + this%varname = varname + + ! assert varshape is not allocated, i.e. initialize has not been called + call assert(.not. allocated(this%varshape), __LINE__) + call this%open_netcdf_variable(NF_NOWRITE) + + ! assume the last dimension for this variable is the time dimension (i.e. first in ncdump) + call assert(size(this%varshape) > 0, __LINE__) + this%timedim_index = size(this%varshape) + end subroutine + + + subroutine open_netcdf_variable(this, mode) + class(netcdf_variable_handle), intent(inout) :: this + integer, intent(in) :: mode + ! EO args + include "netcdf.inc" + integer var_dim_size + integer, allocatable, dimension(:) :: dimids + integer i + + call assert_nc( nf_open(this%filepath, mode, this%fileid) , __LINE__) + call assert_nc( nf_inq_varid(this%fileid, this%varname, this%varid) , __LINE__) + call assert_nc( nf_inq_varndims(this%fileid, this%varid, var_dim_size) , __LINE__) + allocate(dimids(var_dim_size)) + call assert_nc( nf_inq_vardimid(this%fileid, this%varid, dimids) , __LINE__) + + allocate(this%varshape(var_dim_size)) + do i=1, var_dim_size + call assert_nc( nf_inq_dimlen(this%fileid, dimids(i), this%varshape(i)) , __LINE__) + end do + end subroutine + + + subroutine finalize(this) + ! do not implicitly close the file (e.g. upon deallocation via destructor), as we might have a copy of this object with access to the same fileid + class(netcdf_variable_handle), intent(inout) :: this + ! EO args + include "netcdf.inc" + if(allocated(this%varshape)) then + call assert_nc( nf_close(this%fileid) , __LINE__) + end if + end subroutine + + + function number_of_timesteps(this) result(t) + class(netcdf_variable_handle), intent(in) :: this + integer t + ! EO args + t = this%varshape(this%timedim_index) + end function + + + function number_of_dims(this) result(d) + class(netcdf_variable_handle), intent(in) :: this + integer d + ! EO args + d = size(this%varshape) + end function + + + function dimsize_at(this,index) result(s) + class(netcdf_variable_handle), intent(in) :: this + integer, intent(in) :: index + integer s + ! EO args + call assert(index <= size(this%varshape), __LINE__) + s = this%varshape(index) + end function + + + subroutine read_values_r8(this, timeindex, values) + use io_netcdf_nf_interface + use, intrinsic :: ISO_C_BINDING + class(netcdf_variable_handle), intent(in) :: this + integer, intent(in) :: timeindex + real(8), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed + ! EO args + real(8), pointer :: values_ptr(:) + integer, allocatable, dimension(:) :: starts, sizes + + call read_values_preflight(this, timeindex, starts, sizes) + + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_x(this%fileid, this%varid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + subroutine read_values_r4(this, timeindex, values) + use io_netcdf_nf_interface + use, intrinsic :: ISO_C_BINDING + class(netcdf_variable_handle), intent(in) :: this + integer, intent(in) :: timeindex + real(4), intent(inout), target :: values(..) ! must be inout or the allocation might be screwed + ! EO args + real(4), pointer :: values_ptr(:) + integer, allocatable, dimension(:) :: starts, sizes + + call read_values_preflight(this, timeindex, starts, sizes) + + call assert(product(sizes) == product(shape(values)), __LINE__) + + call c_f_pointer(c_loc(values), values_ptr, [product(shape(values))]) + call assert_nc(nf_get_vara_x(this%fileid, this%varid, starts, sizes, values_ptr), __LINE__) + end subroutine + + + subroutine read_values_preflight(this, timeindex, starts, sizes) + class(netcdf_variable_handle), intent(in) :: this + integer, intent(in) :: timeindex + ! EO args + + integer, allocatable, dimension(:) :: starts, sizes + + call assert(allocated(this%varshape), __LINE__) + + allocate(starts(size(this%varshape))) + allocate(sizes(size(this%varshape))) + + call assert(0 < timeindex, __LINE__) + call assert(timeindex <= this%number_of_timesteps(), __LINE__) + + starts = 1 + sizes = this%varshape + starts(this%timedim_index) = timeindex + sizes(this%timedim_index) = 1 !timeindex_last-timeindex_first+1 + end subroutine + + + subroutine assert_nc(status, line) + integer, intent(in) :: status + integer, intent(in) :: line + ! EO args + include "netcdf.inc" + if(status /= nf_noerr) then + print *, "error in line ",line, __FILE__, ' ', trim(nf_strerror(status)) + stop 1 + endif + end subroutine + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO args + if(.NOT. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine + +end module diff --git a/src/io_netcdf_nf_interface.F90 b/src/io_netcdf_nf_interface.F90 new file mode 100644 index 000000000..2c65de35d --- /dev/null +++ b/src/io_netcdf_nf_interface.F90 @@ -0,0 +1,47 @@ +module io_netcdf_nf_interface +implicit none + + interface + function nf_get_vara_double(ncid, varid, start, counts, dvals) result(status) + integer, intent(in) :: ncid, varid + integer, intent(in) :: start(*), counts(*) + real(8), intent(out) :: dvals(*) + integer status + end function + + function nf_get_vara_real(ncid, varid, start, counts, dvals) result(status) + integer, intent(in) :: ncid, varid + integer, intent(in) :: start(*), counts(*) + real(4), intent(out) :: dvals(*) + integer status + end function + end interface + + + interface nf_get_vara_x + procedure nf_get_vara_real, nf_get_vara_double + end interface + + + interface + function nf_put_vara_double(ncid, varid, start, counts, dvals) result(status) + integer, intent(in) :: ncid, varid + integer, intent(in) :: start(*), counts(*) + real(8), intent(in) :: dvals(*) + integer status + end function + + function nf_put_vara_real(ncid, varid, start, counts, dvals) result(status) + integer, intent(in) :: ncid, varid + integer, intent(in) :: start(*), counts(*) + real(4), intent(in) :: dvals(*) + integer status + end function + end interface + + + interface nf_put_vara_x + procedure nf_put_vara_real, nf_put_vara_double + end interface + +end module diff --git a/src/io_netcdf_workaround_module.F90 b/src/io_netcdf_workaround_module.F90 index 09efb7b3e..17271a9cb 100644 --- a/src/io_netcdf_workaround_module.F90 +++ b/src/io_netcdf_workaround_module.F90 @@ -6,18 +6,20 @@ module io_netcdf_workaround_module contains - integer function next_io_rank(communicator, async_netcdf_allowed) result(result) - use g_PARSUP + integer function next_io_rank(communicator, async_netcdf_allowed, partit) result(result) + USE MOD_PARTIT + USE MOD_PARSUP use mpi_topology_module - integer, intent(in) :: communicator - logical, intent(out) :: async_netcdf_allowed + integer, intent(in) :: communicator + logical, intent(out) :: async_netcdf_allowed + type(t_partit), intent(in), target :: partit ! EO args integer rank_use_count integer rank result = next_io_rank_helper(communicator, rank_use_count) if(rank_use_count > 1) then - if(mype == SEQUENTIAL_IO_RANK) print *,"rejecting additional async NetCDF for process:",result, "use count:", rank_use_count, "falling back to sequential I/O on process ",SEQUENTIAL_IO_RANK + if(partit%mype == SEQUENTIAL_IO_RANK) print *,"rejecting additional async NetCDF for process:",result, "use count:", rank_use_count, "falling back to sequential I/O on process ",SEQUENTIAL_IO_RANK result = SEQUENTIAL_IO_RANK async_netcdf_allowed = .false. else diff --git a/src/io_restart.F90 b/src/io_restart.F90 index 5f6d0d674..841cfc474 100644 --- a/src/io_restart.F90 +++ b/src/io_restart.F90 @@ -1,136 +1,90 @@ MODULE io_RESTART - use g_config + use restart_file_group_module + use restart_derivedtype_module use g_clock - use g_parsup - use g_comm_auto - use mod_mesh use o_arrays - use i_arrays use g_cvmix_tke - use g_cvmix_idemix + use MOD_TRACER + use MOD_ICE + use MOD_DYN + use MOD_MESH + use MOD_PARTIT + use MOD_PARSUP + use fortran_utils + implicit none -#include "netcdf.inc" -! -!-------------------------------------------------------------------------------------------- -! - type nc_dims - integer :: size - character(100) :: name - integer :: code - end type nc_dims -! -!-------------------------------------------------------------------------------------------- -! - type nc_vars - character(100) :: name - integer :: code - character(500) :: longname - character(100) :: units - integer :: ndim - integer :: dims(2) !<=2; assume there are no variables with dimension more than 2xNLxT - real(kind=WP), pointer :: pt1(:), pt2(:,:) - end type nc_vars -! -!-------------------------------------------------------------------------------------------- -! - type nc_file - character(500) :: filename - type(nc_dims), allocatable, dimension(:) :: dim - type(nc_vars), allocatable, dimension(:) :: var - integer :: ndim=0, nvar=0 - integer :: rec, Tid, Iid - integer :: ncid - integer :: rec_count=0 - integer :: error_status(250), error_count - logical :: is_in_use=.false. - end type nc_file -! -!-------------------------------------------------------------------------------------------- -! - type type_id - integer :: nd, el, nz, nz1, T, rec, iter - end type type_id -! -!-------------------------------------------------------------------------------------------- -! id will keep the IDs of all required dimentions and variables - type(nc_file), save :: oid, iid - integer, save :: globalstep=0 - type(nc_file), save :: ip_id + public :: restart, finalize_restart + private + + integer, save :: globalstep=0 ! todo: remove this from module scope as it will mess things up if we use async read/write from the same process real(kind=WP) :: ctime !current time in seconds from the beginning of the year - PRIVATE - PUBLIC :: restart, oid, iid - PUBLIC :: ip_id, def_dim, def_variable_1d, def_variable_2d -! -!-------------------------------------------------------------------------------------------- -! generic interface was required to associate variables of unknown rank with the pointers of the same rank -! this allows for automatic streaming of associated variables into the netcdf file - INTERFACE def_variable - MODULE PROCEDURE def_variable_1d, def_variable_2d - END INTERFACE -! -!-------------------------------------------------------------------------------------------- -! + type(restart_file_group) , save :: oce_files + type(restart_file_group) , save :: ice_files + character(:), allocatable, save :: oce_path + character(:), allocatable, save :: ice_path + + character(:), allocatable, save :: raw_restart_dirpath + character(:), allocatable, save :: raw_restart_infopath + character(:), allocatable, save :: bin_restart_dirpath + character(:), allocatable, save :: bin_restart_infopath + integer, parameter :: RAW_RESTART_METADATA_RANK = 0 + + contains ! !-------------------------------------------------------------------------------------------- -! ini_ocean_io initializes oid datatype which contains information of all variables need to be written into +! ini_ocean_io initializes ocean_file datatype which contains information of all variables need to be written into ! the ocean restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ocean_io(year, mesh) - implicit none - +subroutine ini_ocean_io(year, dynamics, tracers, partit, mesh) integer, intent(in) :: year - integer :: ncid, j - integer :: varid + integer :: j character(500) :: longname - character(500) :: filename character(500) :: trname, units character(4) :: cyear - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_mesh), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), target :: tracers + type(t_dyn), target :: dynamics + logical, save :: has_been_called = .false. write(cyear,'(i4)') year - ! create an ocean restart file; serial output implemented so far - oid%filename=trim(ResultPath)//trim(runid)//'.'//cyear//'.oce.restart.nc' - if (oid%is_in_use) return - oid%is_in_use=.true. - call def_dim(oid, 'node', nod2d) - call def_dim(oid, 'elem', elem2d) - call def_dim(oid, 'nz_1', nl-1) - call def_dim(oid, 'nz', nl) + oce_path = trim(ResultPath)//trim(runid)//'.'//cyear//'.oce.restart.nc' + + if(has_been_called) return + has_been_called = .true. !=========================================================================== !===================== Definition part ===================================== !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ !___SSH_____________________________________________________________________ - call def_variable(oid, 'ssh', (/nod2D/), 'sea surface elevation', 'm', eta_n); + call oce_files%def_node_var('ssh', 'sea surface elevation', 'm', dynamics%eta_n, mesh, partit) !___ALE related fields______________________________________________________ - call def_variable(oid, 'hbar', (/nod2D/), 'ALE surface elevation', 'm', hbar); -!!PS call def_variable(oid, 'ssh_rhs', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs); - call def_variable(oid, 'ssh_rhs_old', (/nod2D/), 'RHS for the elevation', '?', ssh_rhs_old); - call def_variable(oid, 'hnode', (/nl-1, nod2D/), 'nodal layer thickness', 'm', hnode); + call oce_files%def_node_var('hbar', 'ALE surface elevation', 'm', mesh%hbar, mesh, partit) +!!PS call oce_files%def_node_var('ssh_rhs', 'RHS for the elevation', '?', ssh_rhs, mesh, partit) + call oce_files%def_node_var('ssh_rhs_old', 'RHS for the elevation', '?', dynamics%ssh_rhs_old, mesh, partit) + call oce_files%def_node_var('hnode', 'nodal layer thickness', 'm', mesh%hnode, mesh, partit) !___Define the netCDF variables for 3D fields_______________________________ - call def_variable(oid, 'u', (/nl-1, elem2D/), 'zonal velocity', 'm/s', UV(1,:,:)); - call def_variable(oid, 'v', (/nl-1, elem2D/), 'meridional velocity', 'm/s', UV(2,:,:)); - call def_variable(oid, 'urhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for u', 'm/s', UV_rhsAB(1,:,:)); - call def_variable(oid, 'vrhs_AB', (/nl-1, elem2D/), 'Adams–Bashforth for v', 'm/s', UV_rhsAB(2,:,:)); + call oce_files%def_elem_var('u', 'zonal velocity', 'm/s', dynamics%uv(1,:,:), mesh, partit) + call oce_files%def_elem_var('v', 'meridional velocity', 'm/s', dynamics%uv(2,:,:), mesh, partit) + call oce_files%def_elem_var('urhs_AB', 'Adams–Bashforth for u', 'm/s', dynamics%uv_rhsAB(1,:,:), mesh, partit) + call oce_files%def_elem_var('vrhs_AB', 'Adams–Bashforth for v', 'm/s', dynamics%uv_rhsAB(2,:,:), mesh, partit) !___Save restart variables for TKE and IDEMIX_________________________________ if (trim(mix_scheme)=='cvmix_TKE' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call def_variable(oid, 'tke', (/nl, nod2d/), 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:)); + call oce_files%def_node_var('tke', 'Turbulent Kinetic Energy', 'm2/s2', tke(:,:), mesh, partit) endif if (trim(mix_scheme)=='cvmix_IDEMIX' .or. trim(mix_scheme)=='cvmix_TKE+IDEMIX') then - call def_variable(oid, 'iwe', (/nl, nod2d/), 'Internal Wave eneryy', 'm2/s2', tke(:,:)); + call oce_files%def_node_var('iwe', 'Internal Wave eneryy', 'm2/s2', tke(:,:), mesh, partit) endif - if (visc_option==8) then - call def_variable(oid, 'uke', (/nl-1, elem2D/), 'unresolved kinetic energy', 'm2/s2', uke(:,:)); - call def_variable(oid, 'uke_rhs', (/nl-1, elem2D/), 'unresolved kinetic energy rhs', 'm2/s2', uke_rhs(:,:)); + if (dynamics%opt_visc==8) then + call oce_files%def_elem_var('uke', 'unresolved kinetic energy', 'm2/s2', uke(:,:), mesh, partit) + call oce_files%def_elem_var('uke_rhs', 'unresolved kinetic energy rhs', 'm2/s2', uke_rhs(:,:), mesh, partit) endif - do j=1,num_tracers + do j=1,tracers%num_tracers SELECT CASE (j) CASE(1) trname='temp' @@ -145,616 +99,784 @@ subroutine ini_ocean_io(year, mesh) write(longname,'(A15,i1)') 'passive tracer ', j units='none' END SELECT - call def_variable(oid, trim(trname), (/nl-1, nod2D/), trim(longname), trim(units), tr_arr(:,:,j)); + call oce_files%def_node_var(trim(trname), trim(longname), trim(units), tracers%data(j)%values(:,:), mesh, partit) longname=trim(longname)//', Adams–Bashforth' - call def_variable(oid, trim(trname)//'_AB',(/nl-1, nod2D/), trim(longname), trim(units), tr_arr_old(:,:,j)); + call oce_files%def_node_var(trim(trname)//'_AB', trim(longname), trim(units), tracers%data(j)%valuesAB(:,:), mesh, partit) end do - call def_variable(oid, 'w', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel); - call def_variable(oid, 'w_expl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_e); - call def_variable(oid, 'w_impl', (/nl, nod2D/), 'vertical velocity', 'm/s', Wvel_i); + call oce_files%def_node_var('w', 'vertical velocity', 'm/s', dynamics%w, mesh, partit) + call oce_files%def_node_var('w_expl', 'vertical velocity', 'm/s', dynamics%w_e, mesh, partit) + call oce_files%def_node_var('w_impl', 'vertical velocity', 'm/s', dynamics%w_i, mesh, partit) end subroutine ini_ocean_io ! !-------------------------------------------------------------------------------------------- -! ini_ice_io initializes iid datatype which contains information of all variables need to be written into +! ini_ice_io initializes ice_file datatype which contains information of all variables need to be written into ! the ice restart file. This is the only place need to be modified if a new variable is added! -subroutine ini_ice_io(year, mesh) - implicit none - +subroutine ini_ice_io(year, ice, partit, mesh) integer, intent(in) :: year - integer :: ncid, j - integer :: varid - character(500) :: longname - character(500) :: filename - character(500) :: trname, units character(4) :: cyear type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_partit), intent(inout), target :: partit + type(t_ice), target :: ice + logical, save :: has_been_called = .false. write(cyear,'(i4)') year - ! create an ocean restart file; serial output implemented so far - iid%filename=trim(ResultPath)//trim(runid)//'.'//cyear//'.ice.restart.nc' - if (iid%is_in_use) return - iid%is_in_use=.true. - call def_dim(iid, 'node', nod2d) + ice_path = trim(ResultPath)//trim(runid)//'.'//cyear//'.ice.restart.nc' + + if(has_been_called) return + has_been_called = .true. !=========================================================================== !===================== Definition part ===================================== !=========================================================================== !___Define the netCDF variables for 2D fields_______________________________ - call def_variable(iid, 'area', (/nod2D/), 'ice concentration [0 to 1]', '%', a_ice); - call def_variable(iid, 'hice', (/nod2D/), 'effective ice thickness', 'm', m_ice); - call def_variable(iid, 'hsnow', (/nod2D/), 'effective snow thickness', 'm', m_snow); - call def_variable(iid, 'uice', (/nod2D/), 'zonal velocity', 'm/s', u_ice); - call def_variable(iid, 'vice', (/nod2D/), 'meridional velocity', 'm', v_ice); + call ice_files%def_node_var('area', 'ice concentration [0 to 1]', '%', ice%data(1)%values(:), mesh, partit) + call ice_files%def_node_var('hice', 'effective ice thickness', 'm', ice%data(2)%values(:), mesh, partit) + call ice_files%def_node_var('hsnow', 'effective snow thickness', 'm', ice%data(3)%values(:), mesh, partit) + call ice_files%def_node_var('uice', 'zonal velocity', 'm/s', ice%uice, mesh, partit) + call ice_files%def_node_var('vice', 'meridional velocity', 'm', ice%vice, mesh, partit) #if defined (__oifs) - call def_variable(iid, 'ice_albedo', (/nod2D/), 'ice albedo', '-', ice_alb); - call def_variable(iid, 'ice_temp',(/nod2D/), 'ice surface temperature', 'K', ice_temp); + call ice_files%def_node_var_optional('ice_albedo', 'ice albedo', '-', ice%atmcoupl%ice_alb, mesh, partit) + call ice_files%def_node_var_optional('ice_temp', 'ice surface temperature', 'K', ice%data(4)%values, mesh, partit) #endif /* (__oifs) */ end subroutine ini_ice_io ! !-------------------------------------------------------------------------------------------- ! -subroutine restart(istep, l_write, l_read, mesh) +subroutine restart(istep, l_read, which_readr, ice, dynamics, tracers, partit, mesh) #if defined(__icepack) - use icedrv_main, only: init_restart_icepack + icepack restart not merged here ! produce a compiler error if USE_ICEPACK=ON; todo: merge icepack restart from 68d8b8b #endif + use fortran_utils implicit none ! this is the main restart subroutine - ! if l_write is TRUE writing restart file will be forced ! if l_read is TRUE the restart file will be read integer :: istep - logical :: l_write, l_read - logical :: is_restart - integer :: mpierr - type(t_mesh), intent(in) , target :: mesh + logical :: l_read + logical :: is_portable_restart_write, is_raw_restart_write, is_bin_restart_write + type(t_mesh) , intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics + type(t_ice) , intent(inout), target :: ice + logical rawfiles_exist, binfiles_exist + logical, save :: initialized_raw = .false. + logical, save :: initialized_bin = .false. + integer mpierr + + !which_readr = ... + ! 0 ... read netcdf restart + ! 1 ... read dump file restart (binary) + ! 2 ... read derived type restart (binary) + integer, intent(out):: which_readr + + integer :: cstep + !_____________________________________________________________________________ + ! initialize directory for core dump restart + if(.not. initialized_raw) then + initialized_raw = .true. + raw_restart_dirpath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(partit%npes) + raw_restart_infopath = trim(ResultPath)//"/fesom_raw_restart/np"//int_to_txt(partit%npes)//".info" + if(raw_restart_length_unit /= "off") then + if(partit%mype == RAW_RESTART_METADATA_RANK) then + ! execute_command_line with mkdir sometimes fails, use a custom implementation around mkdir from C instead + call mkdir(trim(ResultPath)//"/fesom_raw_restart") ! we have no mkdir -p, create the intermediate dirs separately + call mkdir(raw_restart_dirpath) + end if + call MPI_Barrier(partit%MPI_COMM_FESOM, mpierr) ! make sure the dir has been created before we continue... + end if + end if + !_____________________________________________________________________________ + ! initialize directory for derived type binary restart + if(.not. initialized_bin) then + initialized_bin = .true. + bin_restart_dirpath = trim(ResultPath)//"/fesom_bin_restart/np"//int_to_txt(partit%npes) + bin_restart_infopath = trim(ResultPath)//"/fesom_bin_restart/np"//int_to_txt(partit%npes)//".info" + if(bin_restart_length_unit /= "off") then + if(partit%mype == RAW_RESTART_METADATA_RANK) then + ! execute_command_line with mkdir sometimes fails, use a custom implementation around mkdir from C instead + call mkdir(trim(ResultPath)//"/fesom_bin_restart") ! we have no mkdir -p, create the intermediate dirs separately + call mkdir(bin_restart_dirpath) + end if + call MPI_Barrier(partit%MPI_COMM_FESOM, mpierr) ! make sure the dir has been created before we continue... + end if + end if + + !_____________________________________________________________________________ + ! compute current time based on what is written in fesom.clock file ctime=timeold+(dayold-1.)*86400 + + !_____________________________________________________________________________ + ! initialise files for netcdf restart if l_read==TRUE --> the restart file + ! will be read if (.not. l_read) then - call ini_ocean_io(yearnew, mesh) - if (use_ice) call ini_ice_io (yearnew, mesh) -#if defined(__icepack) - if (use_ice) call init_restart_icepack(yearnew, mesh) -#endif + call ini_ocean_io(yearnew, dynamics, tracers, partit, mesh) + if (use_ice) call ini_ice_io (yearnew, ice, partit, mesh) else - call ini_ocean_io(yearold, mesh) - if (use_ice) call ini_ice_io (yearold, mesh) -#if defined(__icepack) - if (use_ice) call init_restart_icepack(yearold, mesh) -#endif + call ini_ocean_io(yearold, dynamics, tracers, partit, mesh) + if (use_ice) call ini_ice_io (yearold, ice, partit, mesh) end if + !___READING OF RESTART________________________________________________________ + ! should restart files be readed --> see r_restart in gen_modules_clock.F90 if (l_read) then - call assoc_ids(oid); call was_error(oid) - call read_restart(oid, mesh); call was_error(oid) - if (use_ice) then - call assoc_ids(iid); call was_error(iid) - call read_restart(iid, mesh); call was_error(iid) -#if defined(__icepack) - call assoc_ids(ip_id); call was_error(ip_id) - call read_restart(ip_id, mesh); call was_error(ip_id) -#endif + ! determine if we can load raw restart dump files --> check if *.info file for + ! raw restarts exist --> if info file exist also the rest must exist --> so + ! core dump restart is readable + if(partit%mype == RAW_RESTART_METADATA_RANK) then + inquire(file=raw_restart_infopath, exist=rawfiles_exist) + end if + call MPI_Bcast(rawfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, partit%MPI_COMM_FESOM, mpierr) + + ! check if folder for derived type binary restarts exist + if(partit%mype == RAW_RESTART_METADATA_RANK) then + inquire(file=bin_restart_infopath, exist=binfiles_exist) + end if + call MPI_Bcast(binfiles_exist, 1, MPI_LOGICAL, RAW_RESTART_METADATA_RANK, partit%MPI_COMM_FESOM, mpierr) + + !___________________________________________________________________________ + ! read core dump file restart + if(rawfiles_exist) then + which_readr = 1 + call read_all_raw_restarts(partit%MPI_COMM_FESOM, partit%mype) + + !___________________________________________________________________________ + ! read derived type binary file restart + elseif(binfiles_exist .and. bin_restart_length_unit /= "off") then + which_readr = 2 + if (use_ice) then + call read_all_bin_restarts(bin_restart_dirpath, & + partit = partit, & + mesh = mesh, & + ice = ice, & + dynamics = dynamics, & + tracers = tracers ) + else + call read_all_bin_restarts(bin_restart_dirpath, & + partit = partit, & + mesh = mesh, & + dynamics = dynamics, & + tracers = tracers ) + end if + !___________________________________________________________________________ + ! read netcdf file restart + else + which_readr = 0 + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read restarts from netcdf file: ocean'//achar(27)//'[0m' + call read_restart(oce_path, oce_files, partit%MPI_COMM_FESOM, partit%mype) + if (use_ice) then + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read restarts from netcdf file: ice'//achar(27)//'[0m' + call read_restart(ice_path, ice_files, partit%MPI_COMM_FESOM, partit%mype) + end if + + ! immediately create a raw core dump restart + if(raw_restart_length_unit /= "off") then + call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) + end if + + ! immediately create a derived type binary restart + if(bin_restart_length_unit /= "off") then + ! current (total) model step --> cstep = globalstep+istep + call write_all_bin_restarts((/globalstep+istep, int(ctime), yearnew/), & + bin_restart_dirpath, & + bin_restart_infopath, & + partit, & + mesh, & + ice, & + dynamics, & + tracers ) + end if end if end if if (istep==0) return - - !check whether restart will be written - is_restart=.false. - - if (restart_length_unit.eq.'y') then - call annual_event(is_restart) - else if (restart_length_unit.eq.'m') then - call monthly_event(is_restart) - else if (restart_length_unit.eq.'d') then - call daily_event(is_restart, restart_length) - else if (restart_length_unit.eq.'h') then - call hourly_event(is_restart, restart_length) - else if (restart_length_unit.eq.'s') then - call step_event(is_restart, istep, restart_length) + + !___WRITING OF RESTART________________________________________________________ + ! check whether restart will be written + ! --> should write netcdf restart: True/False + is_portable_restart_write = is_due(trim(restart_length_unit), restart_length, istep) + + ! --> should write core dump restart: True/False + if(is_portable_restart_write .and. (raw_restart_length_unit /= "off")) then + is_raw_restart_write = .true. ! always write a raw restart together with the portable restart (unless raw restarts are off) else - write(*,*) 'You did not specify a supported outputflag.' - write(*,*) 'The program will stop to give you opportunity to do it.' - call par_ex(1) - stop - endif + is_raw_restart_write = is_due(trim(raw_restart_length_unit), raw_restart_length, istep) + end if + + ! --> should write derived type binary restart: True/False + if(is_portable_restart_write .and. (bin_restart_length_unit /= "off")) then + is_bin_restart_write = .true. ! always write a binary restart together with the portable restart (unless raw restarts are off) + else + is_bin_restart_write = is_due(trim(bin_restart_length_unit), bin_restart_length, istep) + end if - if (l_write) is_restart=.true. + !_____________________________________________________________________________ + ! finally write restart for netcdf, core dump and derived type binary + ! write netcdf restart + if(is_portable_restart_write) then +! if(partit%mype==0) write(*,*)'Do output (netCDF, restart) ...' + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> write restarts to netcdf file: ocean'//achar(27)//'[0m' + call write_restart(oce_path, oce_files, istep) + if(use_ice) then + if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> write restarts to netcdf file: ice'//achar(27)//'[0m' + call write_restart(ice_path, ice_files, istep) + end if + end if - if (.not. is_restart) return + ! write core dump + if(is_raw_restart_write) then + call write_all_raw_restarts(istep, partit%MPI_COMM_FESOM, partit%mype) + end if - ! write restart - if(mype==0) write(*,*)'Do output (netCDF, restart) ...' - call assoc_ids(oid); call was_error(oid) - call write_restart(oid, istep, mesh); call was_error(oid) - if (use_ice) then - call assoc_ids(iid); call was_error(iid) - call write_restart(iid, istep, mesh); call was_error(iid) -#if defined(__icepack) - call assoc_ids(ip_id); call was_error(ip_id) - call write_restart(ip_id, istep, mesh); call was_error(ip_id) -#endif + ! write derived type binary + if(is_bin_restart_write) then + ! current (total) model step --> cstep = globalstep+istep + call write_all_bin_restarts((/globalstep+istep, int(ctime), yearnew/), & + bin_restart_dirpath, & + bin_restart_infopath, & + partit, & + mesh, & + ice, & + dynamics, & + tracers ) end if - + ! actualize clock file to latest restart point - if (mype==0) then - write(*,*) ' --> actualize clock file to latest restart point' - call clock_finish + if (partit%mype==0) then + if(is_portable_restart_write .or. is_raw_restart_write .or. is_bin_restart_write) then + write(*,*) ' --> actualize clock file to latest restart point' + call clock_finish + end if end if end subroutine restart ! -!-------------------------------------------------------------------------------------------- ! -subroutine create_new_file(id) - implicit none - - type(nc_file), intent(inout) :: id - integer :: c, j - integer :: n, k, l, kdim, dimid(4) - character(2000) :: att_text - ! Serial output implemented so far - if (mype/=0) return - c=1 - id%error_status=0 - ! create an ocean output file - write(*,*) 'initializing restart file ', trim(id%filename) - id%error_status(c) = nf_create(id%filename, IOR(NF_NOCLOBBER,IOR(NF_NETCDF4,NF_CLASSIC_MODEL)), id%ncid); c=c+1 - - do j=1, id%ndim -!___Create mesh related dimentions__________________________________________ - id%error_status(c) = nf_def_dim(id%ncid, id%dim(j)%name, id%dim(j)%size, id%dim(j)%code ); c=c+1 - end do +!_______________________________________________________________________________ +subroutine write_restart(path, filegroup, istep) + use fortran_utils + character(len=*), intent(in) :: path + type(restart_file_group), intent(inout) :: filegroup + integer, intent(in) :: istep + ! EO parameters + integer cstep + integer i + character(:), allocatable :: dirpath + character(:), allocatable :: filepath + logical file_exists + + cstep = globalstep+istep + + do i=1, filegroup%nfiles + call filegroup%files(i)%join() ! join the previous write (if required) + + if(filegroup%files(i)%is_iorank()) then + if(filegroup%files(i)%is_attached()) call filegroup%files(i)%close_file() ! close the file from previous write + + dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix + filepath = dirpath//"/"//filegroup%files(i)%varname//".nc" + if(filegroup%files(i)%path == "" .or. (.not. filegroup%files(i)%must_exist_on_read)) then + ! the path to an existing restart file is not set in read_restart if we had a restart from a raw restart + ! OR we might have skipped the file when reading restarts and it does not exist at all + inquire(file=filepath, exist=file_exists) + if(file_exists) then + filegroup%files(i)%path = filepath + else if(.not. filegroup%files(i)%must_exist_on_read) then + filegroup%files(i)%path = "" + end if + end if + if(filegroup%files(i)%path .ne. filepath) then + ! execute_command_line with mkdir sometimes fails, use a custom implementation around mkdir from C instead + call mkdir(dirpath) + filegroup%files(i)%path = filepath + call filegroup%files(i)%open_write_create(filegroup%files(i)%path) + else + call filegroup%files(i)%open_write_append(filegroup%files(i)%path) ! todo: keep the file open between writes + end if + + write(*,*) 'writing restart record ', filegroup%files(i)%rec_count()+1, ' to ', filegroup%files(i)%path + call filegroup%files(i)%write_var(filegroup%files(i)%iter_varindex, [filegroup%files(i)%rec_count()+1], [1], [cstep]) + ! todo: write time via the fesom_file_type + call filegroup%files(i)%write_var(filegroup%files(i)%time_varindex(), [filegroup%files(i)%rec_count()+1], [1], [ctime]) + end if -!___Create time related dimentions__________________________________________ - id%error_status(c) = nf_def_dim(id%ncid, 'time', NF_UNLIMITED, id%rec); c=c+1 -!___Define the time and iteration variables_________________________________ - id%error_status(c) = nf_def_var(id%ncid, 'time', NF_DOUBLE, 1, id%rec, id%tID); c=c+1 - id%error_status(c) = nf_def_var(id%ncid, 'iter', NF_INT, 1, id%rec, id%iID); c=c+1 - - - att_text='time' - id%error_status(c) = nf_put_att_text(id%ncid, id%tID, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 - write(att_text, '(a14,I4.4,a1,I2.2,a1,I2.2,a6)') 'seconds since ', yearold, '-', 1, '-', 1, ' 0:0:0' - id%error_status(c) = nf_put_att_text(id%ncid, id%tID, 'units', len_trim(att_text), trim(att_text)); c=c+1 - - att_text='iteration_count' - id%error_status(c) = nf_put_att_text(id%ncid, id%iID, 'long_name', len_trim(att_text), trim(att_text)); c=c+1 - - do j=1, id%nvar -!___associate physical dimension with the netcdf IDs________________________ - n=id%var(j)%ndim ! shape size of the variable (exluding time) - do k=1, n - !k_th dimension of the variable - kdim=id%var(j)%dims(k) - do l=1, id%ndim ! list all defined dimensions - if (kdim==id%dim(l)%size) dimid(k)=id%dim(l)%code - end do - !write(*,*) "j",j,kdim, ' -> ', dimid(k) - end do - id%error_status(c) = nf_def_var(id%ncid, trim(id%var(j)%name), NF_DOUBLE, id%var(j)%ndim+1, (/dimid(1:n), id%rec/), id%var(j)%code); c=c+1 - !if (n==1) then - ! id%error_status(c)=nf_def_var_chunking(id%ncid, id%var(j)%code, NF_CHUNKED, (/1/)); c=c+1 - if (n==2) then - id%error_status(c)=nf_def_var_chunking(id%ncid, id%var(j)%code, NF_CHUNKED, (/1, id%dim(1)%size/)); ! c=c+1 - end if - id%error_status(c)=nf_put_att_text(id%ncid, id%var(j)%code, 'description', len_trim(id%var(j)%longname), id%var(j)%longname); c=c+1 - id%error_status(c)=nf_put_att_text(id%ncid, id%var(j)%code, 'units', len_trim(id%var(j)%units), id%var(j)%units); c=c+1 + call filegroup%files(i)%async_gather_and_write_variables() end do - - id%error_status(c)=nf_close(id%ncid); c=c+1 - id%error_count=c-1 -end subroutine create_new_file + +end subroutine ! -!-------------------------------------------------------------------------------------------- ! -subroutine def_dim(id, name, ndim) - implicit none - type(nc_file), intent(inout) :: id - character(len=*), intent(in) :: name - integer, intent(in) :: ndim - type(nc_dims), allocatable, dimension(:) :: temp - - if (id%ndim > 0) then - ! create temporal dimension - allocate(temp(id%ndim)); temp=id%dim - ! deallocate the input data array - deallocate(id%dim) - ! then reallocate - id%ndim=id%ndim+1 - allocate(id%dim(id%ndim)) - ! restore the original data - id%dim(1:id%ndim-1)=temp - deallocate(temp) - else - ! first dimension in a file - id%ndim=1 - allocate(id%dim(id%ndim)) - end if - id%dim(id%ndim)%name=trim(name) - id%dim(id%ndim)%size=ndim -end subroutine def_dim +!_______________________________________________________________________________ +subroutine write_all_raw_restarts(istep, mpicomm, mype) + integer, intent(in):: istep + integer, intent(in) :: mpicomm + integer, intent(in) :: mype + ! EO parameters + integer cstep + integer fileunit + + open(newunit = fileunit, file = raw_restart_dirpath//'/'//mpirank_to_txt(mpicomm)//'.dump', form = 'unformatted') + call write_raw_restart_group(oce_files, fileunit) + if(use_ice) call write_raw_restart_group(ice_files, fileunit) + close(fileunit) + + if(mype == RAW_RESTART_METADATA_RANK) then + print *,"writing raw restart to "//raw_restart_dirpath + ! store metadata about the raw restart + cstep = globalstep+istep + open(newunit = fileunit, file = raw_restart_infopath) + write(fileunit, '(g0)') cstep + write(fileunit, '(g0)') ctime + write(fileunit, '(2(g0))') "! year: ",yearnew + write(fileunit, '(3(g0))') "! oce: ", oce_files%nfiles, " variables" + if(use_ice) write(fileunit, '(3(g0))') "! ice: ", ice_files%nfiles, " variables" + close(fileunit) + end if +end subroutine ! -!-------------------------------------------------------------------------------------------- ! -subroutine def_variable_1d(id, name, dims, longname, units, data) - implicit none - type(nc_file), intent(inout) :: id - character(len=*), intent(in) :: name - integer, intent(in) :: dims(1) - character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:) - integer :: c - type(nc_vars), allocatable, dimension(:) :: temp - - if (id%nvar > 0) then - ! create temporal dimension - allocate(temp(id%nvar)); temp=id%var - ! deallocate the input data array - deallocate(id%var) - ! then reallocate - id%nvar=id%nvar+1 - allocate(id%var(id%nvar)) - ! restore the original data - id%var(1:id%nvar-1)=temp - deallocate(temp) - else - ! first dimension in a file - id%nvar=1 - allocate(id%var(id%nvar)) - end if - id%var(id%nvar)%name=trim(name) - id%var(id%nvar)%longname=trim(longname) - id%var(id%nvar)%units=trim(units) - id%var(id%nvar)%ndim=1 - id%var(id%nvar)%dims(1)=dims(1) - id%var(id%nvar)%pt1=>data -end subroutine def_variable_1d +!_______________________________________________________________________________ +subroutine write_raw_restart_group(filegroup, fileunit) + type(restart_file_group), intent(inout) :: filegroup + integer, intent(in) :: fileunit + ! EO parameters + integer i + + do i=1, filegroup%nfiles + call filegroup%files(i)%write_variables_raw(fileunit) + end do +end subroutine +! ! ! +! ! ! +! ! !_______________________________________________________________________________ +! ! subroutine write_all_bin_restarts(istep, ice, dynamics, tracers, partit, mesh) +! ! integer, intent(in) :: istep +! ! type(t_ice) , target, intent(in) :: ice +! ! type(t_dyn) , target, intent(in) :: dynamics +! ! type(t_tracer), target, intent(in) :: tracers +! ! type(t_partit), target, intent(in) :: partit +! ! type(t_mesh) , target, intent(in) :: mesh +! ! +! ! ! EO parameters +! ! integer cstep +! ! integer fileunit, fileunit_i +! ! +! ! !___________________________________________________________________________ +! ! ! write info file +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! print *, achar(27)//'[1;33m'//' --> writing derived type binary restarts to '//bin_restart_dirpath//achar(27)//'[0m' +! ! ! store metadata about the raw restart +! ! cstep = globalstep+istep +! ! fileunit_i = 299 +! ! open(newunit = fileunit_i, file = bin_restart_infopath) +! ! write(fileunit_i, '(g0)') cstep +! ! write(fileunit_i, '(g0)') ctime +! ! write(fileunit_i, '(2(g0))') "! year: ",yearnew +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! mesh derived type +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) mesh +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_mesh" +! ! print *, achar(27)//'[33m'//' > write derived type t_mesh'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! partit derived type +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) partit +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_partit" +! ! print *, achar(27)//'[33m'//' > write derived type t_partit'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! tracer derived type +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) tracers +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_tracer" +! ! print *, achar(27)//'[33m'//' > write derived type t_tracer'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! dynamics derived type +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) dynamics +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_dynamics" +! ! print *, achar(27)//'[33m'//' > write derived type t_dynamics'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! ice derived type +! ! if (use_ice) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = bin_restart_dirpath//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'replace', & +! ! form = 'unformatted') +! ! write(fileunit) ice +! ! close(fileunit) +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) then +! ! write(fileunit_i, '(1(g0))') "! t_ice" +! ! print *, achar(27)//'[33m'//' > write derived type t_ice'//achar(27)//'[0m' +! ! end if +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! if(partit%mype == RAW_RESTART_METADATA_RANK) close(fileunit_i) +! ! +! ! end subroutine +! ! ! +! ! ! +! ! !_______________________________________________________________________________ +! ! subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) +! ! implicit none +! ! +! ! ! do optional here for the usage with dwarfs, since there only specific derived +! ! ! types will be needed +! ! character(len=*), intent(in) :: path_in +! ! type(t_ice) , intent(inout), target, optional :: ice +! ! type(t_dyn) , intent(inout), target, optional :: dynamics +! ! type(t_tracer), intent(inout), target, optional :: tracers +! ! type(t_partit), intent(inout), target, optional :: partit +! ! type(t_mesh) , intent(inout), target, optional :: mesh +! ! integer fileunit +! ! +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[1;33m'//' --> read restarts from derived type binary'//achar(27)//'[0m' +! ! +! ! !___________________________________________________________________________ +! ! ! mesh derived type +! ! if (present(mesh)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) mesh +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! partit derived type +! ! if (present(partit)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) partit +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! tracer derived type +! ! if (present(tracers)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) tracers +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_tracer'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! dynamics derived type +! ! if (present(dynamics)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) dynamics +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_dynamics'//achar(27)//'[0m' +! ! end if +! ! +! ! !___________________________________________________________________________ +! ! ! ice derived type +! ! if (present(ice)) then +! ! fileunit = partit%mype+300 +! ! open(newunit = fileunit, & +! ! file = trim(path_in)//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & +! ! status = 'old', & +! ! form = 'unformatted') +! ! read(fileunit) ice +! ! close(fileunit) +! ! if (partit%mype==RAW_RESTART_METADATA_RANK) print *, achar(27)//'[33m'//' > read derived type t_ice'//achar(27)//'[0m' +! ! end if +! ! end subroutine ! -!-------------------------------------------------------------------------------------------- ! -subroutine def_variable_2d(id, name, dims, longname, units, data) - implicit none - type(nc_file), intent(inout) :: id - character(len=*), intent(in) :: name - integer, intent(in) :: dims(2) - character(len=*), intent(in), optional :: units, longname - real(kind=WP),target, intent(inout) :: data(:,:) - integer :: c - type(nc_vars), allocatable, dimension(:) :: temp - - if (id%nvar > 0) then - ! create temporal dimension - allocate(temp(id%nvar)); temp=id%var - ! deallocate the input data array - deallocate(id%var) - ! then reallocate - id%nvar=id%nvar+1 - allocate(id%var(id%nvar)) - ! restore the original data - id%var(1:id%nvar-1)=temp - deallocate(temp) - else - ! first dimension in a file - id%nvar=1 - allocate(id%var(id%nvar)) - end if - id%var(id%nvar)%name=trim(name) - id%var(id%nvar)%longname=trim(longname) - id%var(id%nvar)%units=trim(units) - id%var(id%nvar)%ndim=2 - id%var(id%nvar)%dims(1:2)=dims - id%var(id%nvar)%pt2=>data -end subroutine def_variable_2d +!_______________________________________________________________________________ +subroutine read_all_raw_restarts(mpicomm, mype) + integer, intent(in) :: mpicomm + integer, intent(in) :: mype + ! EO parameters + integer rstep + real(kind=WP) rtime + integer fileunit + integer status + integer mpierr + include 'mpif.h' + + if(mype == RAW_RESTART_METADATA_RANK) then + ! read metadata info for the raw restart + open(newunit = fileunit, status = 'old', iostat = status, file = raw_restart_infopath) + if(status == 0) then + read(fileunit,*) rstep + read(fileunit,*) rtime + close(fileunit) + else + print *,"can not open ",raw_restart_infopath + stop 1 + end if + + ! compare the restart time with our actual time + if(int(ctime) /= int(rtime)) then + print *, "raw restart time ",rtime,"does not match current clock time",ctime + stop 1 + end if + globalstep = rstep + print *,"reading raw restart from "//raw_restart_dirpath + end if + ! sync globalstep with the other processes to let all processes writing portable restart files know the globalstep + call MPI_Bcast(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, mpicomm, mpierr) + + open(newunit = fileunit, status = 'old', iostat = status, file = raw_restart_dirpath//'/'//mpirank_to_txt(mpicomm)//'.dump', form = 'unformatted') + if(status == 0) then + call read_raw_restart_group(oce_files, fileunit) + if(use_ice) call read_raw_restart_group(ice_files, fileunit) + close(fileunit) + else + print *,"can not open ",raw_restart_dirpath//'/'//mpirank_to_txt(mpicomm)//'.dump' + stop 1 + end if +end subroutine ! -!-------------------------------------------------------------------------------------------- ! -subroutine write_restart(id, istep, mesh) - implicit none - type(nc_file), intent(inout) :: id - integer, intent(in) :: istep - type(t_mesh), intent(in) , target :: mesh - real(kind=WP), allocatable :: aux(:), laux(:) - real(kind=WP) :: t0, t1, t2, t3 - integer :: i, lev, size1, size2, size_gen, size_lev, shape - integer :: c, order - -#include "associate_mesh.h" - - ! Serial output implemented so far - if (mype==0) then - c=1 - !id%rec_count=id%rec_count+1 - write(*,*) 'writing restart record ', id%rec_count - id%error_status(c)=nf_open(id%filename, nf_write, id%ncid); c=c+1 - id%error_status(c)=nf_put_vara_double(id%ncid, id%tID, id%rec_count, 1, ctime, 1); c=c+1 - id%error_status(c)=nf_put_vara_int(id%ncid, id%iID, id%rec_count, 1, globalstep+istep, 1); c=c+1 - end if +!_______________________________________________________________________________ +subroutine read_raw_restart_group(filegroup, fileunit) + type(restart_file_group), intent(inout) :: filegroup + integer, intent(in) :: fileunit + ! EO parameters + integer i + + do i=1, filegroup%nfiles + call filegroup%files(i)%read_variables_raw(fileunit) + end do +end subroutine +! +! +!_______________________________________________________________________________ +! join remaining threads and close all open files +subroutine finalize_restart() + integer i + + ! join all previous writes + ! close all restart files + + do i=1, oce_files%nfiles + call oce_files%files(i)%join() + if(oce_files%files(i)%is_iorank()) then + if(oce_files%files(i)%is_attached()) call oce_files%files(i)%close_file() + end if + end do - call was_error(id); c=1 - - do i=1, id%nvar - shape=id%var(i)%ndim -!_______writing 2D fields________________________________________________ - if (shape==1) then - size1=id%var(i)%dims(1) - if (mype==0) allocate(aux(size1)) - t0=MPI_Wtime() - if (size1==nod2D) call gather_nod (id%var(i)%pt1, aux) - if (size1==elem2D) call gather_elem(id%var(i)%pt1, aux) - t1=MPI_Wtime() - if (mype==0) then - id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/1, id%rec_count/), (/size1, 1/), aux, 1); c=c+1 + if(use_ice) then + do i=1, ice_files%nfiles + call ice_files%files(i)%join() + if(ice_files%files(i)%is_iorank()) then + if(ice_files%files(i)%is_attached()) call ice_files%files(i)%close_file() + end if + end do + end if +end subroutine +! +! +!_______________________________________________________________________________ +subroutine read_restart(path, filegroup, mpicomm, mype) + character(len=*), intent(in) :: path + type(restart_file_group), intent(inout) :: filegroup + integer, intent(in) :: mpicomm + integer, intent(in) :: mype + ! EO parameters + real(kind=WP) rtime + integer i + character(:), allocatable :: dirpath + integer mpistatus(MPI_STATUS_SIZE) + logical file_exists + logical, allocatable :: skip_file(:) + integer current_iorank_snd, current_iorank_rcv + integer max_globalstep + integer mpierr + include 'mpif.h' + + allocate(skip_file(filegroup%nfiles)) + skip_file = .false. + + do i=1, filegroup%nfiles + current_iorank_snd = 0 + current_iorank_rcv = 0 + if( filegroup%files(i)%is_iorank() ) then + dirpath = path(1:len(path)-3) ! chop of the ".nc" suffix + if(filegroup%files(i)%path .ne. dirpath//"/"//filegroup%files(i)%varname//".nc") then + filegroup%files(i)%path = dirpath//"/"//filegroup%files(i)%varname//".nc" + + ! determine if the file should be skipped + if(.not. filegroup%files(i)%must_exist_on_read) then + current_iorank_snd = mype + inquire(file=filegroup%files(i)%path, exist=file_exists) + if(.not. file_exists) skip_file(i) = .true. end if - t2=MPI_Wtime() -#ifdef DEBUG - ! Timeing information for collecting and writing restart file - if (mype==0) write(*,*) 'nvar: ', i, 'size: ', size1, 'gather_nod: ', t1-t0 - if (mype==0) write(*,*) 'nvar: ', i, 'size: ', size1, 'nf_put_var: ', t2-t1 + + if(.not. skip_file(i)) then +#ifndef DISABLE_PARALLEL_RESTART_READ + write(*,*) 'reading restart PARALLEL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path +#else + write(*,*) 'reading restart SEQUENTIAL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path #endif - if (mype==0) deallocate(aux) -!_______writing 3D fields________________________________________________ - elseif (shape==2) then - size1=id%var(i)%dims(1) - size2=id%var(i)%dims(2) - ! I assume that the model has always more surface nodes or elements than - ! vertical layers => more flexibility in terms of array dimensions - if (size1==nod2D .or. size1==elem2D) then - size_gen=size1 - size_lev=size2 - order=1 - else if (size2==nod2D .or. size2==elem2D) then - size_gen=size2 - size_lev=size1 - order=2 else - if (mype==0) write(*,*) 'the shape of the array in the restart file and the grid size are different' - call par_ex - stop - end if - if (mype==0) allocate(aux (size_gen)) - if (size_gen==nod2D) allocate(laux(myDim_nod2D +eDim_nod2D )) - if (size_gen==elem2D) allocate(laux(myDim_elem2D+eDim_elem2D)) - do lev=1, size_lev - if (order==1) laux=id%var(i)%pt2(:,lev) - if (order==2) laux=id%var(i)%pt2(lev,:) - if (size_gen==nod2D) call gather_nod (laux, aux) - if (size_gen==elem2D) call gather_elem(laux, aux) - if (mype==0) then - if (order==1) id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/1, lev, id%rec_count/), (/size_gen, 1, 1/), aux, 1); c=c+1 - if (order==2) id%error_status(c)=nf_put_vara_double(id%ncid, id%var(i)%code, (/lev, 1, id%rec_count/), (/1, size_gen, 1/), aux, 1); c=c+1 - end if - t2=MPI_Wtime() -#ifdef DEBUG - ! Timeing information for collecting and writing output file - if (mype==0) write(*,*) 'nvar: ', i, 'size: ', size2, 'lev: ', lev, 'gather_nod: ', t1-t0 - if (mype==0) write(*,*) 'nvar: ', i, 'size: ', size2, 'lev: ', lev, 'nf_put_var: ', t2-t1 +#ifndef DISABLE_PARALLEL_RESTART_READ + write(*,*) 'skipping reading restart PARALLEL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path +#else + write(*,*) 'skipping reading restart SEQUENTIAL for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path #endif - end do - deallocate(laux) - if (mype==0) deallocate(aux) - else - if (mype==0) write(*,*) 'not supported shape of array in restart file' - call par_ex - stop - end if - call was_error(id); c=1 + end if + + if(.not. skip_file(i)) call filegroup%files(i)%open_read(filegroup%files(i)%path) ! do we need to bother with read-only access? + ! todo: print a reasonable error message if the file does not exist + end if + end if + + ! iorank already knows if we skip the file, tell the others + if(.not. filegroup%files(i)%must_exist_on_read) then + call MPI_Allreduce(current_iorank_snd, current_iorank_rcv, 1, MPI_INTEGER, MPI_SUM, mpicomm, mpierr) + call MPI_Bcast(skip_file(i), 1, MPI_LOGICAL, current_iorank_rcv, mpicomm, mpierr) + end if + + if(.not. skip_file(i)) call filegroup%files(i)%async_read_and_scatter_variables() +#ifndef DISABLE_PARALLEL_RESTART_READ end do + + do i=1, filegroup%nfiles +#endif + if(skip_file(i)) cycle + call filegroup%files(i)%join() - if (mype==0) id%error_count=c-1 - call was_error(id) - if (mype==0) id%error_status(1)=nf_close(id%ncid); - id%error_count=1 - call was_error(id) -end subroutine write_restart -! -!-------------------------------------------------------------------------------------------- -! -subroutine read_restart(id, mesh, arg) - implicit none - type(nc_file), intent(inout) :: id - integer, optional, intent(in) :: arg - real(kind=WP), allocatable :: aux(:), laux(:) - integer :: i, lev, size1, size2, size_gen, size_lev, shape - integer :: rec2read, c, order - real(kind=WP) :: rtime !timestamp of the record - logical :: file_exist=.False. - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! laux=0. - ! Serial output implemented so far - c=1 - if (mype==0) then - file_exist=.False. - inquire(file=id%filename,exist=file_exist) - if (file_exist) then - write(*,*) ' reading restart file: ', trim(id%filename) - id%error_status(c)=nf_open(id%filename, nf_nowrite, id%ncid); c=c+1 - id%error_status(c)=nf_get_vara_int(id%ncid, id%iID, id%rec_count, 1, globalstep, 1); c=c+1 - id%error_status(c)=nf_get_vara_double(id%ncid, id%tID, id%rec_count, 1, rtime, 1); c=c+1 - else - write(*,*) - print *, achar(27)//'[33m' - write(*,*) '____________________________________________________________________' - write(*,*) ' ERROR: could not find restart_file:',trim(id%filename),'!' - write(*,*) '____________________________________________________________________' - print *, achar(27)//'[0m' - write(*,*) - call par_ex - end if - - if (.not. present(arg)) then - rec2read=id%rec_count - else - rec2read=arg - end if - write(*,*) 'restart from record ', rec2read, ' of ', id%rec_count + if(filegroup%files(i)%is_iorank()) then + write(*,*) 'restart from record ', filegroup%files(i)%rec_count(), ' of ', filegroup%files(i)%rec_count(), filegroup%files(i)%path + + ! read the last entry from the iter variable + call filegroup%files(i)%read_var1(filegroup%files(i)%iter_varindex, [filegroup%files(i)%rec_count()], globalstep) + + ! read the last entry from the time variable + call filegroup%files(i)%read_var1(filegroup%files(i)%time_varindex(), [filegroup%files(i)%rec_count()], rtime) + call filegroup%files(i)%close_file() if (int(ctime)/=int(rtime)) then - write(*,*) 'Reading restart: timestamps in restart and in clock files do not match' + write(*,*) 'Reading restart: timestamps in restart and in clock files do not match for ', filegroup%files(i)%varname, ' at ', filegroup%files(i)%path write(*,*) 'restart/ times are:', ctime, rtime write(*,*) 'the model will stop!' - id%error_status(c)=-310; c=c+1 - end if - end if - - call was_error(id); c=1 - - do i=1, id%nvar - shape=id%var(i)%ndim - if (mype==0) write(*,*) 'reading restart for ', trim(id%var(i)%name) -!_______writing 2D fields________________________________________________ - if (shape==1) then - size1=id%var(i)%dims(1) - if (mype==0) then - allocate(aux(size1)) - id%error_status(c)=nf_get_vara_double(id%ncid, id%var(i)%code, (/1, id%rec_count/), (/size1, 1/), aux, 1); c=c+1 -! write(*,*) 'min/max 2D =', minval(aux), maxval(aux) - end if - if (size1==nod2D) call broadcast_nod (id%var(i)%pt1, aux) - if (size1==elem2D) call broadcast_elem(id%var(i)%pt1, aux) - if (mype==0) deallocate(aux) -!_______writing 3D fields________________________________________________ - elseif (shape==2) then - size1=id%var(i)%dims(1) - size2=id%var(i)%dims(2) - ! I assume that the model has always more surface nodes or elements than - ! vertical layers => more flexibility in terms of array dimensions - if (size1==nod2D .or. size1==elem2D) then - size_gen=size1 - size_lev=size2 - order=1 - else if (size2==nod2D .or. size2==elem2D) then - size_gen=size2 - size_lev=size1 - order=2 - else - if (mype==0) write(*,*) 'the shape of the array in the restart file and the grid size are different' - call par_ex - stop - end if - if (mype==0) allocate(aux (size_gen)) - if (size_gen==nod2D) allocate(laux(myDim_nod2D +eDim_nod2D )) - if (size_gen==elem2D) allocate(laux(myDim_elem2D+eDim_elem2D)) - do lev=1, size_lev - if (mype==0) then - if (order==1) id%error_status(c)=nf_get_vara_double(id%ncid, id%var(i)%code, (/1, lev, id%rec_count/), (/size_gen, 1, 1/), aux, 1); c=c+1 - if (order==2) id%error_status(c)=nf_get_vara_double(id%ncid, id%var(i)%code, (/lev, 1, id%rec_count/), (/1, size_gen, 1/), aux, 1); c=c+1 - end if - id%var(i)%pt2(lev,:)=0. - if (size_gen==nod2D) then - call broadcast_nod (laux, aux) - if (order==1) id%var(i)%pt2(:,lev)=laux(1:myDim_nod2D+eDim_nod2D) - if (order==2) id%var(i)%pt2(lev,:)=laux(1:myDim_nod2D+eDim_nod2D) - end if - if (size_gen==elem2D) then - call broadcast_elem(laux, aux) - if (order==1) id%var(i)%pt2(:,lev)=laux(1:myDim_elem2D+eDim_elem2D) - if (order==2) id%var(i)%pt2(lev,:)=laux(1:myDim_elem2D+eDim_elem2D) - end if - end do - deallocate(laux) - if (mype==0) deallocate(aux) - else - if (mype==0) write(*,*) 'not supported shape of array in restart file when reading restart' - call par_ex - stop - end if - call was_error(id); c=1 + stop 1 + end if + end if end do - if (mype==0) id%error_status(1)=nf_close(id%ncid); - id%error_count=1 - call was_error(id) -end subroutine read_restart -! -!-------------------------------------------------------------------------------------------- -! -subroutine assoc_ids(id) - implicit none - - type(nc_file), intent(inout) :: id - character(500) :: longname - integer :: c, j, k - real(kind=WP) :: rtime !timestamp of the record - ! Serial output implemented so far - if (mype/=0) return - c=1 - id%error_status=0 - ! open existing netcdf file - write(*,*) 'associating restart file ', trim(id%filename) - - id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid) - !if the file does not exist it will be created! - if (id%error_status(c) .ne. nf_noerr) then - call create_new_file(id) ! error status counter will be reset - c=id%error_count+1 - id%error_status(c) = nf_open(id%filename, nf_nowrite, id%ncid); c=c+1 + ! sync globalstep with processes which may have skipped a restart upon reading and thus need to know the globalstep when writing their restart + if( any(skip_file .eqv. .true.) ) then + call MPI_Allreduce(globalstep, max_globalstep, 1, MPI_INTEGER, MPI_MAX, mpicomm, mpierr) + globalstep = max_globalstep end if - do j=1, id%ndim -!___Associate mesh related dimentions_______________________________________ - id%error_status(c) = nf_inq_dimid(id%ncid, id%dim(j)%name, id%dim(j)%code); c=c+1 - end do -!___Associate time related dimentions_______________________________________ - id%error_status(c) = nf_inq_dimid (id%ncid, 'time', id%rec); c=c+1 - id%error_status(c) = nf_inq_dimlen(id%ncid, id%rec, id%rec_count); c=c+1 -!___Associate the time and iteration variables______________________________ - id%error_status(c) = nf_inq_varid(id%ncid, 'time', id%tID); c=c+1 - id%error_status(c) = nf_inq_varid(id%ncid, 'iter', id%iID); c=c+1 -!___if the time rtime at the rec_count does not equal ctime we look for the closest record with the -! timestamp less than ctime - do k=id%rec_count, 1, -1 - id%error_status(c)=nf_get_vara_double(id%ncid, id%tID, k, 1, rtime, 1); - if (ctime > rtime) then - id%rec_count=k+1 - exit ! a proper rec_count detected, ready for writing restart, exit the loop - elseif (ctime == rtime) then - id%rec_count=k - exit ! a proper rec_count detected, ready for reading restart, exit the loop - end if - if (k==1) then - if (mype==0) write(*,*) 'WARNING: all dates in restart file are after the current date' - if (mype==0) write(*,*) 'reading restart will not be possible !' - if (mype==0) write(*,*) 'the model attempted to start with the time stamp = ', int(ctime) - id%error_status(c)=-310; - end if - end do - c=c+1 ! check will be made only for the last nf_get_vara_double - id%rec_count=max(id%rec_count, 1) -!___Associate physical variables____________________________________________ - do j=1, id%nvar - id%error_status(c) = nf_inq_varid(id%ncid, id%var(j)%name, id%var(j)%code); c=c+1 - end do - id%error_status(c)=nf_close(id%ncid); c=c+1 - id%error_count=c-1 - write(*,*) 'current restart counter = ', id%rec_count -end subroutine assoc_ids + ! sync globalstep with the process responsible for raw restart metadata + if(filegroup%nfiles >= 1) then + ! use the first restart I/O process to send the globalstep + if( filegroup%files(1)%is_iorank() .and. (mype .ne. RAW_RESTART_METADATA_RANK)) then + call MPI_Send(globalstep, 1, MPI_INTEGER, RAW_RESTART_METADATA_RANK, 42, mpicomm, mpierr) + else if((mype == RAW_RESTART_METADATA_RANK) .and. (.not. filegroup%files(1)%is_iorank())) then + call MPI_Recv(globalstep, 1, MPI_INTEGER, MPI_ANY_SOURCE, 42, mpicomm, mpistatus, mpierr) + end if + end if +end subroutine ! -!-------------------------------------------------------------------------------------------- ! -subroutine was_error(id) - implicit none - type(nc_file), intent(inout) :: id - integer :: k, status, ierror - - call MPI_BCast(id%error_count, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - call MPI_BCast(id%error_status(1), id%error_count, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - - do k=1, id%error_count - status=id%error_status(k) - if (status .ne. nf_noerr) then - if (mype==0) write(*,*) 'error counter=', k - if (mype==0) call handle_err(status) - call par_ex - stop - end if - end do -end subroutine was_error -END MODULE io_RESTART +!_______________________________________________________________________________ + function is_due(unit, frequency, istep) result(d) + character(len=*), intent(in) :: unit + integer, intent(in) :: frequency + integer, intent(in) :: istep + logical d + ! EO parameters + d = .false. + + if(unit.eq.'y') then + call annual_event(d) + else if(unit.eq.'m') then + call monthly_event(d) + else if(unit.eq.'d') then + call daily_event(d, frequency) + else if(unit.eq.'h') then + call hourly_event(d, frequency) + else if(unit.eq.'s') then + call step_event(d, istep, frequency) + else if(unit.eq.'off') then + d = .false. + else + write(*,*) 'You did not specify a supported outputflag.' + write(*,*) 'The program will stop to give you opportunity to do it.' + stop 1 + stop + end if + end function +! ! +! ! +! !_______________________________________________________________________________ +! function mpirank_to_txt(mpicomm) result(txt) +! use fortran_utils +! integer, intent(in) :: mpicomm +! character(:), allocatable :: txt +! ! EO parameters +! integer mype +! integer npes +! integer mpierr +! include 'mpif.h' +! +! call MPI_Comm_Rank(mpicomm, mype, mpierr) +! call MPI_Comm_Size(mpicomm, npes, mpierr) +! txt = int_to_txt_pad(mype,int(log10(real(npes)))+1) ! pad to the width of the number of processes +! end function +!!PS --> move this function also to fortran_utils.F90 + +end module diff --git a/src/io_restart_derivedtype.F90 b/src/io_restart_derivedtype.F90 new file mode 100644 index 000000000..c6f4d9049 --- /dev/null +++ b/src/io_restart_derivedtype.F90 @@ -0,0 +1,252 @@ +module restart_derivedtype_module + interface + subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, partit, mesh, ice, dynamics, tracers) + use MOD_ICE + use MOD_DYN + use MOD_TRACER + use MOD_PARTIT + use MOD_MESH + integer, dimension(3) , intent(in) :: ctarr + character(len=*), intent(in) :: path_in + character(len=*), intent(in) :: pathi_in + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + type(t_ice) , intent(inout), target, optional :: ice + type(t_dyn) , intent(inout), target, optional :: dynamics + type(t_tracer), intent(inout), target, optional :: tracers + end subroutine + + subroutine read_all_bin_restarts(path_in, partit, mesh, ice, dynamics, tracers) + use MOD_ICE + use MOD_DYN + use MOD_TRACER + use MOD_PARTIT + use MOD_MESH + character(len=*), intent(in) :: path_in + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + type(t_ice) , intent(inout), target, optional :: ice + type(t_dyn) , intent(inout), target, optional :: dynamics + type(t_tracer), intent(inout), target, optional :: tracers + end subroutine + end interface +end module +! +! +!_______________________________________________________________________________ +subroutine write_all_bin_restarts(ctarr, path_in, pathi_in, partit, mesh, ice, dynamics, tracers) + use MOD_ICE + use MOD_DYN + use MOD_TRACER + use MOD_PARTIT + use MOD_MESH + use fortran_utils + implicit none + + integer, dimension(3) , intent(in) :: ctarr ! //cstep,ctime,cyear// + character(len=*) , intent(in) :: path_in + character(len=*) , intent(in) :: pathi_in + type(t_partit), target, intent(in) :: partit + type(t_mesh) , target, intent(in) :: mesh + type(t_ice) , target, intent(in), optional :: ice + type(t_dyn) , target, intent(in), optional :: dynamics + type(t_tracer), target, intent(in), optional :: tracers + + ! EO parameters + integer fileunit, fileunit_i + +#if defined(__PGI) + if (partit%mype == 0) then + write(*,*) 'write_all_bin_restarts is deactivated for PGI compiler because of T_TRACER%DATA & T_ICE%DATA cause write call to crash' + write(*,*) '*** checked for NVHPC/22.1 ***' + end if +#else + + !___________________________________________________________________________ + ! write info file + if(partit%mype == 0) then + print *, achar(27)//'[1;33m'//' --> writing derived type binary restarts to '//trim(path_in)//achar(27)//'[0m' + ! store metadata about the raw restart + fileunit_i = 299 + open(newunit = fileunit_i, file = trim(pathi_in)) + write(fileunit_i, '(g0)') ctarr(1) + write(fileunit_i, '(g0)') ctarr(2) + write(fileunit_i, '(2(g0))') "! year: ",ctarr(3) + end if + + !___________________________________________________________________________ + ! mesh derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) mesh + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_mesh" + print *, achar(27)//'[33m'//' > write derived type t_mesh'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! partit derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) partit + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_partit" + print *, achar(27)//'[33m'//' > write derived type t_partit'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! tracer derived type + if (present(tracers)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) tracers + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_tracer" + print *, achar(27)//'[33m'//' > write derived type t_tracer'//achar(27)//'[0m' + end if + end if + + !___________________________________________________________________________ + ! dynamics derived type + if (present(dynamics)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) dynamics + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_dynamics" + print *, achar(27)//'[33m'//' > write derived type t_dynamics'//achar(27)//'[0m' + end if + end if + + !___________________________________________________________________________ + ! ice derived type + if (present(ice)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'replace', & + form = 'unformatted') + write(fileunit) ice + close(fileunit) + if(partit%mype == 0) then + write(fileunit_i, '(1(g0))') "! t_ice" + print *, achar(27)//'[33m'//' > write derived type t_ice'//achar(27)//'[0m' + end if + end if + !___________________________________________________________________________ + if(partit%mype == 0) close(fileunit_i) +#endif !defined(__PGI) +end subroutine +! +! +!_______________________________________________________________________________ +! read derived type binary restart files, depending on input (see optional) not +! all derived type binaries are read --> functionalitiy for dwarfs ! +subroutine read_all_bin_restarts(path_in, ice, dynamics, tracers, partit, mesh) + use MOD_ICE + use MOD_DYN + use MOD_TRACER + use MOD_PARTIT + use MOD_MESH + use fortran_utils + implicit none + + ! do optional here for the usage with dwarfs, since there only specific derived + ! types will be needed + character(len=*), intent(in) :: path_in + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + type(t_ice) , intent(inout), target, optional :: ice + type(t_dyn) , intent(inout), target, optional :: dynamics + type(t_tracer), intent(inout), target, optional :: tracers + integer fileunit + +#if defined(__PGI) + if (partit%mype == 0) then + write(*,*) 'read_all_bin_restarts is deactivated for PGI compiler because of T_TRACER%DATA & T_ICE%DATA cause write call to crash' + write(*,*) '*** checked for NVHPC/22.1 ***' + end if +#else + + !___________________________________________________________________________ + if (partit%mype==0) print *, achar(27)//'[1;33m'//' --> read restarts from derived type binary'//achar(27)//'[0m' + + !___________________________________________________________________________ + ! mesh derived type + fileunit = partit%mype+300 + open( newunit = fileunit, & + file = trim(path_in)//'/'//'t_mesh.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) mesh + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_mesh'//achar(27)//'[0m' + + !___________________________________________________________________________ + ! partit derived type + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_partit.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) partit + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_partit'//achar(27)//'[0m' + + !___________________________________________________________________________ + ! tracer derived type + if (present(tracers)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_tracer.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) tracers + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_tracer'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! dynamics derived type + if (present(dynamics)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_dynamics.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) dynamics + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_dynamics'//achar(27)//'[0m' + end if + + !___________________________________________________________________________ + ! ice derived type + if (present(ice)) then + fileunit = partit%mype+300 + open(newunit = fileunit, & + file = trim(path_in)//'/'//'t_ice.'//mpirank_to_txt(partit%MPI_COMM_FESOM), & + status = 'old', & + form = 'unformatted') + read(fileunit) ice + close(fileunit) + if (partit%mype==0) print *, achar(27)//'[33m'//' > read derived type t_ice'//achar(27)//'[0m' + end if +#endif !defined(__PGI) +end subroutine + diff --git a/src/io_restart_file_group.F90 b/src/io_restart_file_group.F90 new file mode 100644 index 000000000..772c8e1d0 --- /dev/null +++ b/src/io_restart_file_group.F90 @@ -0,0 +1,189 @@ +! helper module to treat split restart files similar as the previous single-file ones +module restart_file_group_module + use io_fesom_file_module + use MOD_PARTIT + implicit none + public restart_file_group + private + + + type, extends(fesom_file_type) :: restart_file_type + integer iter_varindex + character(:), allocatable :: varname + character(:), allocatable :: path + logical must_exist_on_read + end type + + + type restart_file_group + private + type(restart_file_type), public :: files(20) + integer, public :: nfiles = 0 ! todo: allow dynamically allocated size without messing with shallow copied pointers + contains + generic, public :: def_node_var => def_node_var_2d, def_node_var_3d + generic, public :: def_elem_var => def_elem_var_2d, def_elem_var_3d + procedure, private :: def_node_var_2d, def_node_var_3d + procedure, private :: def_elem_var_2d, def_elem_var_3d + ! def_*_optional procedures create a restart variable which does not have to exist when reading the restart file + generic, public :: def_node_var_optional => def_node_var_2d_optional, def_node_var_3d_optional + generic, public :: def_elem_var_optional => def_elem_var_2d_optional, def_elem_var_3d_optional + procedure, private :: def_node_var_2d_optional, def_node_var_3d_optional + procedure, private :: def_elem_var_2d_optional, def_elem_var_3d_optional + end type + +contains + + + subroutine def_node_var_2d(this, name, longname, units, local_data, mesh, partit) + use mod_mesh + class(restart_file_group), target, intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit + ! EO parameters + + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl, partit) + call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) + end subroutine + + + subroutine def_node_var_3d(this, name, longname, units, local_data, mesh, partit) + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit + ! EO parameters + + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl, partit) + call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) + end subroutine + + + subroutine def_elem_var_2d(this, name, longname, units, local_data, mesh, partit) + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit + ! EO parameters + + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl, partit) + call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) + end subroutine + + + subroutine def_elem_var_3d(this, name, longname, units, local_data, mesh, partit) + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit + ! EO parameters + + call add_file(this, name, .true., mesh%nod2d, mesh%elem2d, mesh%nl, partit) + call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) + end subroutine + + + subroutine add_file(g, name, must_exist_on_read, mesh_nod2d, mesh_elem2d, mesh_nl, partit) + class(restart_file_group), target, intent(inout) :: g + character(len=*), intent(in) :: name + logical must_exist_on_read + integer mesh_nod2d, mesh_elem2d, mesh_nl + type(t_partit), intent(in) :: partit + ! EO parameters + type(restart_file_type), pointer :: f + + call assert(g%nfiles < size(g%files), __LINE__) + g%nfiles = g%nfiles+1 + f => g%files(g%nfiles) + + f%path = "" + allocate(f%varname,source=name) + f%must_exist_on_read = must_exist_on_read + call f%fesom_file_type%init(mesh_nod2d, mesh_elem2d, mesh_nl, partit) + ! this is specific for a restart file + f%iter_varindex = f%add_var_int('iter', [f%time_dimindex()]) + end subroutine + + + subroutine def_node_var_2d_optional(this, name, longname, units, local_data, mesh, partit) + use mod_mesh + class(restart_file_group), target, intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit + ! EO parameters + + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl, partit) + call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) + end subroutine + + + subroutine def_node_var_3d_optional(this, name, longname, units, local_data, mesh, partit) + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit + ! EO parameters + + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl, partit) + call this%files(this%nfiles)%specify_node_var(name, longname, units, local_data) + end subroutine + + + subroutine def_elem_var_2d_optional(this, name, longname, units, local_data, mesh, partit) + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit + ! EO parameters + + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl, partit) + call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) + end subroutine + + + subroutine def_elem_var_3d_optional(this, name, longname, units, local_data, mesh, partit) + use mod_mesh + class(restart_file_group), intent(inout) :: this + character(len=*), intent(in) :: name + character(len=*), intent(in) :: units, longname + real(kind=8), target, intent(inout) :: local_data(:,:) ! todo: be able to set precision + type(t_mesh), intent(in) :: mesh + type(t_partit), intent(in) :: partit + ! EO parameters + + call add_file(this, name, .false., mesh%nod2d, mesh%elem2d, mesh%nl, partit) + call this%files(this%nfiles)%specify_elem_var(name, longname, units, local_data) + end subroutine + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO parameters + if(.not. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine + +end module diff --git a/src/io_scatter.F90 b/src/io_scatter.F90 new file mode 100644 index 000000000..27e8714ce --- /dev/null +++ b/src/io_scatter.F90 @@ -0,0 +1,130 @@ +module io_scatter_module + implicit none + public scatter_nod2D, scatter_elem2D + private + +contains + + + ! thread-safe procedure + subroutine scatter_nod2D(arr2D_global, arr2D_local, root_rank, comm, partit) + use MOD_PARTIT + use, intrinsic :: iso_fortran_env, only: real64 + real(real64), intent(in) :: arr2D_global(:) + real(real64), intent(out) :: arr2D_local(:) + integer, intent(in) :: root_rank ! rank of sending process + integer, intent(in) :: comm + type(t_partit), intent(in) :: partit + ! EO args + integer :: tag = 0 + integer :: mpi_precision = MPI_DOUBLE_PRECISION + integer status(MPI_STATUS_SIZE) + integer :: remote_rank + integer, allocatable :: remote_list_nod2d(:) + real(real64), allocatable :: sendbuf(:) + integer node_size + integer mpierr + + call assert(size(arr2D_local) == size(partit%mylist_nod2d), __LINE__) ! == mydim_nod2d+edim_nod2d, i.e. partition nodes + halo nodes + + if(partit%mype == root_rank) then + arr2D_local = arr2D_global(partit%mylist_nod2d) + do remote_rank = 0, partit%npes-1 + if(remote_rank == root_rank) cycle + + ! receive remote partition 2D size + call mpi_recv(node_size, 1, mpi_integer, remote_rank, tag+0, comm, status, mpierr) + + ! receive remote mylist_nod2d + allocate(remote_list_nod2d(node_size)) + call mpi_recv(remote_list_nod2d(1), node_size, mpi_integer, remote_rank, tag+1, comm, status, mpierr) + + allocate(sendbuf(node_size)) + sendbuf = arr2D_global(remote_list_nod2d) + deallocate(remote_list_nod2d) + + call mpi_send(sendbuf(1), node_size, mpi_precision, remote_rank, tag+2, comm, mpierr) + deallocate(sendbuf) + end do + + else + node_size = size(partit%mylist_nod2d) + call mpi_send(node_size, 1, mpi_integer, root_rank, tag+0, comm, mpierr) + call mpi_send(partit%mylist_nod2d(1), node_size, mpi_integer, root_rank, tag+1, comm, mpierr) + + call mpi_recv(arr2D_local(1), node_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) ! aleph blocks here + end if + + ! without a barrier, we get wrong results in arr2D_local + ! todo: not sure why this happens (probably because the 3D levels have the same send/recv signature), get rid of the barrier if possible + call mpi_barrier(comm, mpierr) + end subroutine + + + ! thread-safe procedure + subroutine scatter_elem2D(arr2D_global, arr2D_local, root_rank, comm, partit) + use MOD_PARTIT + use, intrinsic :: iso_fortran_env, only: real64 + real(real64), intent(in) :: arr2D_global(:) + real(real64), intent(out) :: arr2D_local(:) + integer, intent(in) :: root_rank ! rank of sending process + integer, intent(in) :: comm + type(t_partit), intent(in) :: partit + ! EO args + integer :: tag = 0 + integer :: mpi_precision = MPI_DOUBLE_PRECISION + integer status(MPI_STATUS_SIZE) + integer :: remote_rank + integer, allocatable :: remote_list_elem2d(:) + real(real64), allocatable :: sendbuf(:) + integer elem_size + integer mpierr + + elem_size = size(arr2D_local) + call assert(elem_size == partit%mydim_elem2d+partit%edim_elem2d, __LINE__) ! mylist_elem2d is larger and can not be used for comparison here + + if(partit%mype == root_rank) then + arr2D_local = arr2D_global(partit%myList_elem2D(1:elem_size)) + do remote_rank = 0, partit%npes-1 + if(remote_rank == root_rank) cycle + + ! receive remote partition 2D size + call mpi_recv(elem_size, 1, mpi_integer, remote_rank, tag+0, comm, status, mpierr) + + ! receive remote mylist_elem2d + allocate(remote_list_elem2d(elem_size)) + call mpi_recv(remote_list_elem2d(1), elem_size, mpi_integer, remote_rank, tag+1, comm, status, mpierr) + + allocate(sendbuf(elem_size)) + sendbuf = arr2D_global(remote_list_elem2d) + deallocate(remote_list_elem2d) + + call mpi_send(sendbuf(1), elem_size, mpi_precision, remote_rank, tag+2, comm, mpierr) + deallocate(sendbuf) + end do + + else + call mpi_send(elem_size, 1, mpi_integer, root_rank, tag+0, comm, mpierr) + call mpi_send(partit%mylist_elem2d(1), elem_size, mpi_integer, root_rank, tag+1, comm, mpierr) + + call mpi_recv(arr2D_local(1), elem_size, mpi_precision, root_rank, tag+2, comm, status, mpierr) + end if + + ! without a barrier, we get wrong results in arr2D_local + ! todo: not sure why this happens (probably because the 3D levels have the same send/recv signature), get rid of the barrier if possible + call mpi_barrier(comm, mpierr) + end subroutine + + + subroutine assert(val, line) + logical, intent(in) :: val + integer, intent(in) :: line + ! EO parameters + if(.not. val) then + print *, "error in line ",line, __FILE__ + stop 1 + end if + end subroutine + +end module + diff --git a/src/mpi_topology_module.F90 b/src/mpi_topology_module.F90 index 8bd876566..ea69ee737 100644 --- a/src/mpi_topology_module.F90 +++ b/src/mpi_topology_module.F90 @@ -25,7 +25,9 @@ module mpi_topology_module type :: mpi_topology_type contains - procedure, nopass :: next_host_head_rank, set_hostname_strategy, reset_state + procedure, nopass :: next_host_head_rank + procedure, nopass :: set_hostname_strategy + procedure, nopass :: reset_state end type type(mpi_topology_type) mpi_topology diff --git a/src/oce_adv_tra_driver.F90 b/src/oce_adv_tra_driver.F90 index 9454a007b..c51be2e17 100644 --- a/src/oce_adv_tra_driver.F90 +++ b/src/oce_adv_tra_driver.F90 @@ -1,89 +1,123 @@ module oce_adv_tra_driver_interfaces interface - subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v, opth, optv, mesh) + subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, mesh) use MOD_MESH - use g_PARSUP - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport - real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttfAB (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: opth, optv + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + real(kind=WP), intent(in), target :: dt + integer, intent(in) :: tr_num + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) end subroutine end interface end module module oce_tra_adv_flux2dtracer_interface interface - subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) - !update the solution for vertical and horizontal flux contributions + subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) + !update the solution for vertical and horizontal flux contributions use MOD_MESH - use g_PARSUP - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: flux_v(mesh%nl, myDim_nod2D) + USE MOD_PARTIT + USE MOD_PARSUP + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: flux_v(mesh%nl, partit%myDim_nod2D) logical, optional :: use_lo - real(kind=WP), optional :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), optional :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) + real(kind=WP), optional :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), optional :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) end subroutine end interface end module ! ! !=============================================================================== -subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v, opth, optv, mesh) +subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, dynamics, tracers, partit, mesh) use MOD_MESH - use O_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN use g_comm_auto use oce_adv_tra_hor_interfaces use oce_adv_tra_ver_interfaces use oce_adv_tra_fct_interfaces use oce_tra_adv_flux2dtracer_interface implicit none - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(in), target :: W(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WI(mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in), target :: WE(mesh%nl, myDim_nod2D+eDim_nod2D) - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport - real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttfAB(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: opth, optv - real(kind=WP), pointer, dimension (:,:) :: pwvel - + real(kind=WP), intent(in), target :: dt + integer, intent(in) :: tr_num + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + + real(kind=WP), pointer, dimension (:,:) :: pwvel + real(kind=WP), pointer, dimension (:,:) :: ttf, ttfAB, fct_LO + real(kind=WP), pointer, dimension (:,:) :: adv_flux_hor, adv_flux_ver, dttf_h, dttf_v + real(kind=WP), pointer, dimension (:,:) :: fct_ttf_min, fct_ttf_max + real(kind=WP), pointer, dimension (:,:) :: fct_plus, fct_minus + + integer, pointer, dimension (:) :: nboundary_lay + real(kind=WP), pointer, dimension (:,:,:) :: edge_up_dn_grad + integer :: el(2), enodes(2), nz, n, e - integer :: nl12, nu12, nl1, nl2, nu1, nu2, tr_num + integer :: nl12, nu12, nl1, nl2, nu1, nu2 real(kind=WP) :: cLO, cHO, deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: qc, qu, qd real(kind=WP) :: tvert(mesh%nl), tvert_e(mesh%nl), a, b, c, d, da, db, dg, vflux, Tupw1 real(kind=WP) :: Tmean, Tmean1, Tmean2, num_ord + real(kind=WP) :: opth, optv logical :: do_zero_flux -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ttf => tracers%data(tr_num)%values + ttfAB => tracers%data(tr_num)%valuesAB + opth = tracers%data(tr_num)%tra_adv_ph + optv = tracers%data(tr_num)%tra_adv_pv + fct_LO => tracers%work%fct_LO + adv_flux_ver => tracers%work%adv_flux_ver + adv_flux_hor => tracers%work%adv_flux_hor + edge_up_dn_grad => tracers%work%edge_up_dn_grad + nboundary_lay => tracers%work%nboundary_lay + fct_ttf_min => tracers%work%fct_ttf_min + fct_ttf_max => tracers%work%fct_ttf_max + fct_plus => tracers%work%fct_plus + fct_minus => tracers%work%fct_minus + dttf_h => tracers%work%del_ttf_advhoriz + dttf_v => tracers%work%del_ttf_advvert !___________________________________________________________________________ ! compute FCT horzontal and vertical low order solution as well as lw order ! part of antidiffusive flux - if (trim(tra_adv_lim)=='FCT') then + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then ! compute the low order upwind horizontal flux - ! init_zero=.true. : zero the horizontal flux before computation - ! init_zero=.false. : input flux will be substracted - call adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, adv_flux_hor, init_zero=.true.) - + ! o_init_zero=.true. : zero the horizontal flux before computation + ! o_init_zero=.false. : input flux will be substracted + call adv_tra_hor_upw1(vel, ttf, partit, mesh, adv_flux_hor, o_init_zero=.true.) ! update the LO solution for horizontal contribution - fct_LO=0.0_WP +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + fct_LO(:,n) = 0.0_WP + end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(e, enodes, el, nl1, nu1, nl2, nu2, nu12, nl12, nz) do e=1, myDim_edge2D enodes=edges(:,e) el=edge_tri(:,e) @@ -101,127 +135,137 @@ subroutine do_oce_adv_tra(ttf, ttfAB, vel, w, wi, we, do_Xmoment, dttf_h, dttf_v if (nu2>0) nu12 = min(nu1,nu2) !!PS do nz=1, max(nl1, nl2) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED +#endif + do nz=nu12, nl12 + fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + end do + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) do nz=nu12, nl12 - fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) - fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) +#endif + fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) end do +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED +#endif end do - +!$OMP END PARALLEL DO ! compute the low order upwind vertical flux (explicit part only) ! zero the input/output flux before computation - call adv_tra_ver_upw1(ttf, we, do_Xmoment, mesh, adv_flux_ver, init_zero=.true.) - + call adv_tra_ver_upw1(we, ttf, partit, mesh, adv_flux_ver, o_init_zero=.true.) ! update the LO solution for vertical contribution +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nu1, nl1, nz) do n=1, myDim_nod2D nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) !!PS do nz=1, nlevels_nod2D(n)-1 do nz= nu1, nl1-1 - fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/area(nz,n))/hnode_new(nz,n) + fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) end do end do - - if (w_split) then !wvel/=wvel_e +!$OMP END PARALLEL DO + + if (dynamics%use_wsplit) then !wvel/=wvel_e ! update for implicit contribution (w_split option) - call adv_tra_vert_impl(fct_LO, wi, mesh) + call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) ! compute the low order upwind vertical flux (full vertical velocity) ! zero the input/output flux before computation ! --> compute here low order part of vertical anti diffusive fluxes, ! has to be done on the full vertical velocity w - call adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, adv_flux_ver, init_zero=.true.) + call adv_tra_ver_upw1(w, ttf, partit, mesh, adv_flux_ver, o_init_zero=.true.) end if - - call exchange_nod(fct_LO) + call exchange_nod(fct_LO, partit) +!$OMP BARRIER end if - do_zero_flux=.true. - if (trim(tra_adv_lim)=='FCT') do_zero_flux=.false. - + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') do_zero_flux=.false. !___________________________________________________________________________ ! do horizontal tracer advection, in case of FCT high order solution - SELECT CASE(trim(tra_adv_hor)) + SELECT CASE(trim(tracers%data(tr_num)%tra_adv_hor)) CASE('MUSCL') - ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) - call adv_tra_hor_muscl(ttfAB, uv, do_Xmoment, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) + ! compute the untidiffusive horizontal flux (o_init_zero=.false.: input is the LO horizontal flux computed above) + call adv_tra_hor_muscl(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, o_init_zero=do_zero_flux) CASE('MFCT') - call adv_tra_hor_mfct(ttfAB, uv, do_Xmoment, mesh, opth, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_mfct(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, o_init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_hor_upw1(ttfAB, uv, do_Xmoment, mesh, adv_flux_hor, init_zero=do_zero_flux) + call adv_tra_hor_upw1(vel, ttfAB, partit, mesh, adv_flux_hor, o_init_zero=do_zero_flux) CASE DEFAULT !unknown - if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tra_adv_hor), '! Check your namelists!' - call par_ex(1) + if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracers%data(tr_num)%tra_adv_hor), '! Check your namelists!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) END SELECT - - if (trim(tra_adv_lim)=='FCT') then + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then pwvel=>w else pwvel=>we end if - !___________________________________________________________________________ ! do vertical tracer advection, in case of FCT high order solution - SELECT CASE(trim(tra_adv_ver)) + SELECT CASE(trim(tracers%data(tr_num)%tra_adv_ver)) CASE('QR4C') - ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) - call adv_tra_ver_qr4c (ttfAB, pwvel, do_Xmoment, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) + ! compute the untidiffusive vertical flux (o_init_zero=.false.:input is the LO vertical flux computed above) + call adv_tra_ver_qr4c ( pwvel, ttfAB, partit, mesh, optv, adv_flux_ver, o_init_zero=do_zero_flux) CASE('CDIFF') - call adv_tra_ver_cdiff(ttfAB, pwvel, do_Xmoment, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_cdiff( pwvel, ttfAB, partit, mesh, adv_flux_ver, o_init_zero=do_zero_flux) CASE('PPM') - call adv_tra_vert_ppm (ttfAB, pwvel, do_Xmoment, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_vert_ppm(dt, pwvel, ttfAB, partit, mesh, adv_flux_ver, o_init_zero=do_zero_flux) CASE('UPW1') - call adv_tra_ver_upw1 (ttfAB, pwvel, do_Xmoment, mesh, adv_flux_ver, init_zero=do_zero_flux) + call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, o_init_zero=do_zero_flux) CASE DEFAULT !unknown - if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tra_adv_ver), '! Check your namelists!' - call par_ex(1) + if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) ! --> be aware the vertical implicite part in case without FCT is done in - ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, mesh) + ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, partit, mesh) ! for do_wimpl=.true. END SELECT - !___________________________________________________________________________ ! -!if (mype==0) then -! write(*,*) 'check new:' -! write(*,*) '1:', minval(fct_LO), maxval(fct_LO), sum(fct_LO) -! write(*,*) '2:', minval(adv_flux_hor), maxval(adv_flux_hor), sum(adv_flux_hor) -! write(*,*) '3:', minval(adv_flux_ver), maxval(adv_flux_ver), sum(adv_flux_ver) -!end if - if (trim(tra_adv_lim)=='FCT') then -!if (mype==0) write(*,*) 'before:', sum(abs(adv_flux_ver)), sum(abs(adv_flux_hor)) - call oce_tra_adv_fct(dttf_h, dttf_v, ttf, fct_LO, adv_flux_hor, adv_flux_ver, mesh) -!if (mype==0) write(*,*) 'after:', sum(abs(adv_flux_ver)), sum(abs(adv_flux_hor)) - call oce_tra_adv_flux2dtracer(dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh, use_lo=.TRUE., ttf=ttf, lo=fct_LO) + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then + !edge_up_dn_grad will be used as an auxuary array here + call oce_tra_adv_fct(dt, ttf, fct_LO, adv_flux_hor, adv_flux_ver, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, edge_up_dn_grad, partit, mesh) + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, partit, mesh, use_lo=.TRUE., ttf=ttf, lo=fct_LO) else - call oce_tra_adv_flux2dtracer(dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, mesh) + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, partit, mesh) end if end subroutine do_oce_adv_tra ! ! !=============================================================================== -subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo, ttf, lo) +subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) use MOD_MESH - use O_MESH use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG + USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: flux_v(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: flux_v(mesh%nl, partit%myDim_nod2D) logical, optional :: use_lo - real(kind=WP), optional :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), optional :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) + real(kind=WP), optional :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), optional :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) integer :: n, nz, k, elem, enodes(3), num, el(2), nu12, nl12, nu1, nu2, nl1, nl2, edge -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! c. Update the solution ! Vertical +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nu12, nl12, nu1, nu2, nl1, nl2, edge) if (present(use_lo)) then if (use_lo) then +!$OMP DO do n=1, myDim_nod2d nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -230,19 +274,20 @@ subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo dttf_v(nz,n)=dttf_v(nz,n)-ttf(nz,n)*hnode(nz,n)+LO(nz,n)*hnode_new(nz,n) end do end do +!$OMP END DO end if end if - +!$OMP DO do n=1, myDim_nod2d nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) do nz=nu1,nl1-1 - dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/area(nz,n) + dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/areasvol(nz,n) end do end do - - +!$OMP END DO ! Horizontal +!$OMP DO do edge=1, myDim_edge2D enodes(1:2)=edges(:,edge) el=edge_tri(:,edge) @@ -260,10 +305,27 @@ subroutine oce_tra_adv_flux2dtracer(dttf_h, dttf_v, flux_h, flux_v, mesh, use_lo nu12 = nu1 if (nu2>0) nu12 = min(nu1,nu2) - !!PS do nz=1, max(nl1, nl2) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED +#endif + do nz=nu12, nl12 + dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/areasvol(nz,enodes(1)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + end do + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) do nz=nu12, nl12 - dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/area(nz,enodes(1)) - dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/area(nz,enodes(2)) +#endif + dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/areasvol(nz,enodes(2)) end do +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED +#endif end do +!$OMP END DO +!$OMP END PARALLEL end subroutine oce_tra_adv_flux2dtracer diff --git a/src/oce_adv_tra_fct.F90 b/src/oce_adv_tra_fct.F90 index 4af76fdf7..acd772930 100644 --- a/src/oce_adv_tra_fct.F90 +++ b/src/oce_adv_tra_fct.F90 @@ -1,61 +1,75 @@ module oce_adv_tra_fct_interfaces interface - subroutine oce_adv_tra_fct_init(mesh) + subroutine oce_adv_tra_fct_init(twork, partit, mesh) use MOD_MESH - use g_PARSUP - type(t_mesh), intent(in), target :: mesh + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit),intent(inout), target :: partit + type(t_tracer_work), intent(inout), target :: twork end subroutine - subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) + subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) use MOD_MESH - use g_PARSUP - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: adf_v(mesh%nl, myDim_nod2D) + USE MOD_PARTIT + USE MOD_PARSUP + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array end subroutine end interface end module ! ! !=============================================================================== -subroutine oce_adv_tra_fct_init(mesh) +subroutine oce_adv_tra_fct_init(twork, partit, mesh) use MOD_MESH - use O_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP implicit none - integer :: my_size - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + integer :: my_size + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer_work), intent(inout), target :: twork +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" my_size=myDim_nod2D+eDim_nod2D - allocate(fct_LO(nl-1, my_size)) ! Low-order solution - allocate(adv_flux_hor(nl-1,myDim_edge2D)) ! antidiffusive hor. contributions / from edges - allocate(adv_flux_ver(nl, myDim_nod2D)) ! antidiffusive ver. fluxes / from nodes + allocate(twork%fct_LO(nl-1, my_size)) ! Low-order solution + allocate(twork%adv_flux_hor(nl-1,partit%myDim_edge2D)) ! antidiffusive hor. contributions / from edges + allocate(twork%adv_flux_ver(nl, partit%myDim_nod2D)) ! antidiffusive ver. fluxes / from nodes - allocate(fct_ttf_max(nl-1, my_size),fct_ttf_min(nl-1, my_size)) - allocate(fct_plus(nl-1, my_size),fct_minus(nl-1, my_size)) + allocate(twork%fct_ttf_max(nl-1, my_size),twork%fct_ttf_min(nl-1, my_size)) + allocate(twork%fct_plus(nl-1, my_size), twork%fct_minus(nl-1, my_size)) ! Initialize with zeros: - fct_LO=0.0_WP - adv_flux_hor=0.0_WP - adv_flux_ver=0.0_WP - fct_ttf_max=0.0_WP - fct_ttf_min=0.0_WP - fct_plus=0.0_WP - fct_minus=0.0_WP + twork%fct_LO=0.0_WP + twork%adv_flux_hor=0.0_WP + twork%adv_flux_ver=0.0_WP + twork%fct_ttf_max=0.0_WP + twork%fct_ttf_min=0.0_WP + twork%fct_plus=0.0_WP + twork%fct_minus=0.0_WP if (mype==0) write(*,*) 'FCT is initialized' end subroutine oce_adv_tra_fct_init + ! ! !=============================================================================== -subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) +subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) ! ! 3D Flux Corrected Transport scheme ! Limits antidiffusive fluxes==the difference in flux HO-LO @@ -63,28 +77,34 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) ! HO ==High-order (3rd/4th order gradient reconstruction method) ! Adds limited fluxes to the LO solution use MOD_MESH - use O_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG - use g_comm_auto + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE g_comm_auto implicit none - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: lo (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, myDim_edge2D) - real(kind=WP), intent(inout) :: adf_v(mesh%nl, myDim_nod2D) + real(kind=WP), intent(in), target :: dt + type(t_mesh), intent(in), target :: mesh + type(t_partit),intent(inout), target :: partit + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array, let us use twork%edge_up_dn_grad(1:4, 1:NL-2, 1:partit%myDim_edge2D) to save space integer :: n, nz, k, elem, enodes(3), num, el(2), nl1, nl2, nu1, nu2, nl12, nu12, edge real(kind=WP) :: flux, ae,tvert_max(mesh%nl-1),tvert_min(mesh%nl-1) real(kind=WP) :: flux_eps=1e-16 real(kind=WP) :: bignumber=1e3 - integer :: vlimit=1 - -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, k, elem, enodes, num, el, nl1, nl2, nu1, nu2, nl12, nu12, edge, & +!$OMP flux, ae,tvert_max, tvert_min) ! -------------------------------------------------------------------------- ! ttf is the tracer field on step n ! del_ttf is the increment @@ -92,6 +112,7 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) ! -------------------------------------------------------------------------- !___________________________________________________________________________ ! a1. max, min between old solution and updated low-order solution per node +!$OMP DO do n=1,myDim_nod2D + edim_nod2d nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -99,119 +120,73 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) fct_ttf_max(nz,n)=max(LO(nz,n), ttf(nz,n)) fct_ttf_min(nz,n)=min(LO(nz,n), ttf(nz,n)) end do - end do - + end do +!$OMP END DO !___________________________________________________________________________ ! a2. Admissible increments on elements ! (only layers below the first and above the last layer) - ! look for max, min bounds for each element --> UV_rhs here auxilary array + ! look for max, min bounds for each element --> AUX here auxilary array +!$OMP DO do elem=1, myDim_elem2D enodes=elem2D_nodes(:,elem) nu1 = ulevels(elem) nl1 = nlevels(elem) do nz=nu1, nl1-1 - UV_rhs(1,nz,elem)=maxval(fct_ttf_max(nz,enodes)) - UV_rhs(2,nz,elem)=minval(fct_ttf_min(nz,enodes)) + AUX(1,nz,elem)=maxval(fct_ttf_max(nz,enodes)) + AUX(2,nz,elem)=minval(fct_ttf_min(nz,enodes)) end do if (nl1<=nl-1) then do nz=nl1,nl-1 - UV_rhs(1,nz,elem)=-bignumber - UV_rhs(2,nz,elem)= bignumber + AUX(1,nz,elem)=-bignumber + AUX(2,nz,elem)= bignumber end do endif end do ! --> do elem=1, myDim_elem2D - +!$OMP END DO !___________________________________________________________________________ ! a3. Bounds on clusters and admissible increments ! Vertical1: In this version we look at the bounds on the clusters ! above and below, which leaves wide bounds because typically ! vertical gradients are larger. - if(vlimit==1) then !Horizontal - do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - - !___________________________________________________________________ - do nz=nu1,nl1-1 - ! max,min horizontal bound in cluster around node n in every - ! vertical layer - ! nod_in_elem2D --> elem indices of which node n is surrounded - ! nod_in_elem2D_num --> max number of surrounded elem - tvert_max(nz)= maxval(UV_rhs(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(UV_rhs(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - end do - - !___________________________________________________________________ - ! calc max,min increment of surface layer with respect to low order - ! solution - fct_ttf_max(nu1,n)=tvert_max(nu1)-LO(nu1,n) - fct_ttf_min(nu1,n)=tvert_min(nu1)-LO(nu1,n) - - ! calc max,min increment from nz-1:nz+1 with respect to low order - ! solution at layer nz - do nz=nu1+1,nl1-2 - fct_ttf_max(nz,n)=maxval(tvert_max(nz-1:nz+1))-LO(nz,n) - fct_ttf_min(nz,n)=minval(tvert_min(nz-1:nz+1))-LO(nz,n) - end do - ! calc max,min increment of bottom layer -1 with respect to low order - ! solution - nz=nl1-1 - fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) - fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) - end do - end if - - !___________________________________________________________________________ - ! Vertical2: Similar to the version above, but the vertical bounds are more - ! local - if(vlimit==2) then - do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - do nz=nu1,nl1-1 - tvert_max(nz)= maxval(UV_rhs(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(UV_rhs(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - end do - do nz=nu1+1, nl1-2 - tvert_max(nz)=max(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) - tvert_min(nz)=min(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) - end do - do nz=nu1,nl1-1 - fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) - fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) - end do - end do - end if - - !___________________________________________________________________________ - ! Vertical3: Vertical bounds are taken into account only if they are narrower than the - ! horizontal ones - if(vlimit==3) then - do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - do nz=nu1, nl1-1 - tvert_max(nz)= maxval(UV_rhs(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - tvert_min(nz)= minval(UV_rhs(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) - end do - do nz=nu1+1, nl1-2 - tvert_max(nz)=min(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) - tvert_min(nz)=max(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) - end do - do nz=nu1, nl1-1 - fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) - fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) - end do - end do - end if - +!$OMP DO + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !___________________________________________________________________ + do nz=nu1,nl1-1 + ! max,min horizontal bound in cluster around node n in every + ! vertical layer + ! nod_in_elem2D --> elem indices of which node n is surrounded + ! nod_in_elem2D_num --> max number of surrounded elem + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + end do + !___________________________________________________________________ + ! calc max,min increment of surface layer with respect to low order + ! solution + fct_ttf_max(nu1,n)=tvert_max(nu1)-LO(nu1,n) + fct_ttf_min(nu1,n)=tvert_min(nu1)-LO(nu1,n) + ! calc max,min increment from nz-1:nz+1 with respect to low order + ! solution at layer nz + do nz=nu1+1,nl1-2 + fct_ttf_max(nz,n)=maxval(tvert_max(nz-1:nz+1))-LO(nz,n) + fct_ttf_min(nz,n)=minval(tvert_min(nz-1:nz+1))-LO(nz,n) + end do + ! calc max,min increment of bottom layer -1 with respect to low order + ! solution + nz=nl1-1 + fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) + fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) + end do +!$OMP END DO !___________________________________________________________________________ ! b1. Split positive and negative antidiffusive contributions ! --> sum all positive (fct_plus), negative (fct_minus) antidiffusive ! horizontal element and vertical node contribution to node n and layer nz ! see. R. Löhner et al. "finite element flux corrected transport (FEM-FCT) ! for the euler and navier stoke equation +!$OMP DO do n=1, myDim_nod2D nu1 = ulevels_nod2D(n) nl1 = nlevels_nod2D(n) @@ -220,67 +195,83 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) fct_minus(nz,n)=0._WP end do end do - +!$OMP END DO !Vertical +!$OMP DO do n=1, myDim_nod2D - nu1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - do nz=nu1,nl1-1 -! fct_plus(nz,n)=fct_plus(nz,n)+ & -! (max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) & -! /hnode(nz,n) -! fct_minus(nz,n)=fct_minus(nz,n)+ & -! (min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) & -! /hnode(nz,n) - fct_plus(nz,n) =fct_plus(nz,n) +(max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) - fct_minus(nz,n)=fct_minus(nz,n)+(min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) - end do + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 + fct_plus(nz,n) =fct_plus(nz,n) +(max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) + fct_minus(nz,n)=fct_minus(nz,n)+(min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) + end do end do - +!$OMP END DO + +!$OMP DO !Horizontal do edge=1, myDim_edge2D - enodes(1:2)=edges(:,edge) - el=edge_tri(:,edge) - nl1=nlevels(el(1))-1 - nu1=ulevels(el(1)) - nl2=0 - nu2=0 - if(el(2)>0) then - nl2=nlevels(el(2))-1 - nu2=ulevels(el(2)) - end if + enodes(1:2)=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + nu1=ulevels(el(1)) + nl2=0 + nu2=0 + if (el(2)>0) then + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + end if - nl12 = max(nl1,nl2) - nu12 = nu1 - if (nu2>0) nu12 = min(nu1,nu2) - - do nz=nu12, nl12 - fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) - fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) - fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) - fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) - end do - end do - + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED +#endif + do nz=nu12, nl12 + fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) + fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + end do + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) + do nz=nu12, nl12 +#endif + fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) + fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) + end do +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED +#endif + end do +!$OMP END DO !___________________________________________________________________________ ! b2. Limiting factors +!$OMP DO do n=1,myDim_nod2D nu1=ulevels_nod2D(n) nl1=nlevels_nod2D(n) do nz=nu1,nl1-1 - flux=fct_plus(nz,n)*dt/area(nz,n)+flux_eps + flux=fct_plus(nz,n)*dt/areasvol(nz,n)+flux_eps fct_plus(nz,n)=min(1.0_WP,fct_ttf_max(nz,n)/flux) - flux=fct_minus(nz,n)*dt/area(nz,n)-flux_eps + flux=fct_minus(nz,n)*dt/areasvol(nz,n)-flux_eps fct_minus(nz,n)=min(1.0_WP,fct_ttf_min(nz,n)/flux) end do end do - +!$OMP END DO ! fct_minus and fct_plus must be known to neighbouring PE - call exchange_nod(fct_plus, fct_minus) - +!$OMP MASTER + call exchange_nod(fct_plus, fct_minus, partit) +!$OMP END MASTER +!$OMP BARRIER !___________________________________________________________________________ ! b3. Limiting !Vertical +!$OMP DO do n=1, myDim_nod2D nu1=ulevels_nod2D(n) nl1=nlevels_nod2D(n) @@ -311,10 +302,9 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) end do ! the bottom flux is always zero end do - - call exchange_nod_end ! fct_plus, fct_minus - +!$OMP END DO !Horizontal +!$OMP DO do edge=1, myDim_edge2D enodes(1:2)=edges(:,edge) el=edge_tri(:,edge) @@ -346,4 +336,6 @@ subroutine oce_tra_adv_fct(dttf_h, dttf_v, ttf, lo, adf_h, adf_v, mesh) adf_h(nz,edge)=ae*adf_h(nz,edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine oce_tra_adv_fct diff --git a/src/oce_adv_tra_hor.F90 b/src/oce_adv_tra_hor.F90 index 883c489a5..c7e209dfd 100644 --- a/src/oce_adv_tra_hor.F90 +++ b/src/oce_adv_tra_hor.F90 @@ -8,15 +8,17 @@ module oce_adv_tra_hor_interfaces ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) + subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH - use g_PARSUP - type(t_mesh), intent(in) , target :: mesh - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - integer, intent(in) :: do_Xmoment - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - logical, optional :: init_zero + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + logical, optional :: o_init_zero end subroutine !=============================================================================== ! MUSCL @@ -25,67 +27,81 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) ! IF init_zero=.TRUE. : flux will be set to zero before computation ! IF init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) + subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, o_init_zero) use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: o_init_zero end subroutine ! a not stable version of MUSCL (reconstruction in the vicinity of bottom topography is not upwind) ! it runs with FCT option only - subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, o_init_zero) use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit),intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: o_init_zero end subroutine end interface end module ! ! !=============================================================================== -subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) +subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH - use O_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG + USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none - type(t_mesh), intent(in) , target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - logical, optional :: init_zero + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: a, vflux integer :: el(2), enodes(2), nz, edge integer :: nu12, nl12, nl1, nl2, nu1, nu2 -#include "associate_mesh.h" - +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do edge=1, myDim_edge2D + flux(:,edge)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! The result is the low-order solution horizontal fluxes ! They are put into flux !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, & +!$OMP a, vflux, el, enodes, nz, nu12, nl12, nl1, nl2, nu1, nu2) +!$OMP DO do edge=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,edge) @@ -141,8 +157,8 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) !____________________________________________________________________ ! 1st. low order upwind solution flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)) & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)) & )-flux(nz, edge) end do @@ -159,8 +175,8 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) !___________________________________________________________ ! 1st. low order upwind solution flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)))-flux(nz, edge) + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) end do end if @@ -177,8 +193,8 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) +(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)))-flux(nz, edge) + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) end do !_______________________________________________________________________ @@ -190,8 +206,8 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) !____________________________________________________________________ ! 1st. low order upwind solution flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)) & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)) & )-flux(nz, edge) end do @@ -204,30 +220,33 @@ subroutine adv_tra_hor_upw1(ttf, vel, do_Xmoment, mesh, flux, init_zero) !_______________________________________________________________ ! 1st. low order upwind solution flux(nz, edge)=-0.5_WP*( & - (ttf(nz, enodes(1))**do_Xmoment)*(vflux+abs(vflux))+ & - (ttf(nz, enodes(2))**do_Xmoment)*(vflux-abs(vflux)))-flux(nz, edge) + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_hor_upw1 ! ! !=============================================================================== -subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) +subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, o_init_zero) use MOD_MESH - use O_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO real(kind=WP) :: c_lo(2) @@ -235,17 +254,29 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer integer :: el(2), enodes(2), nz, edge integer :: nu12, nl12, nl1, nl2, nu1, nu2 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do edge=1, myDim_edge2D + flux(:,edge)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! The result is the low-order solution horizontal fluxes ! They are put into flux !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, Tmean1, Tmean2, cHO, & +!$OMP c_lo, a, vflux, el, enodes, nz, nu12, nl12, nl1, nl2, nu1, nu2) +!$OMP DO do edge=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,edge) @@ -311,8 +342,8 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !____________________________________________________________________ ! volume flux across the segments vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -338,8 +369,8 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !_______________________________________________________________ ! volume flux across the segments vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end if @@ -426,8 +457,8 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !___________________________________________________________________ ! (1-num_ord) is done with 3rd order upwind - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -450,8 +481,8 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !____________________________________________________________________ ! volume flux across the segments vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -474,47 +505,61 @@ subroutine adv_tra_hor_muscl(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zer !____________________________________________________________________ ! volume flux across the segments vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_hor_muscl ! ! !=============================================================================== -subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero) + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, o_init_zero) use MOD_MESH - use O_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto implicit none + type(t_partit),intent(inout), target :: partit type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: vel(2, mesh%nl-1, myDim_elem2D+eDim_elem2D) - real(kind=WP), intent(inout) :: flux(mesh%nl-1, myDim_edge2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 real(kind=WP) :: Tmean1, Tmean2, cHO real(kind=WP) :: a, vflux integer :: el(2), enodes(2), nz, edge integer :: nu12, nl12, nl1, nl2, nu1, nu2 -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do edge=1, myDim_edge2D + flux(:,edge)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! The result is the low-order solution horizontal fluxes ! They are put into flux !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, Tmean1, Tmean2, cHO, & +!$OMP a, vflux, el, enodes, nz, nu12, nl12, nl1, nl2, nu1, nu2) +!$OMP DO do edge=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,edge) @@ -577,8 +622,8 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !____________________________________________________________________ ! volume flux across the segments vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -600,8 +645,8 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !___________________________________________________________________ ! volume flux across the segments vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end if @@ -684,8 +729,8 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !___________________________________________________________________ ! (1-num_ord) is done with 3rd order upwind - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -705,8 +750,8 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !____________________________________________________________________ ! volume flux across the segments vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do !_______________________________________________________________________ @@ -726,9 +771,11 @@ subroutine adv_tra_hor_mfct(ttf, vel, do_Xmoment, mesh, num_ord, flux, init_zero !____________________________________________________________________ ! volume flux across the segments vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - cHO=(vflux+abs(vflux))*(Tmean1**do_Xmoment) + (vflux-abs(vflux))*(Tmean2**do_Xmoment) - flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*( 0.5_WP*(Tmean1+Tmean2))**do_Xmoment-flux(nz,edge) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_hor_mfct diff --git a/src/oce_adv_tra_ver.F90 b/src/oce_adv_tra_ver.F90 index cd07d947f..1041a1607 100644 --- a/src/oce_adv_tra_ver.F90 +++ b/src/oce_adv_tra_ver.F90 @@ -2,109 +2,119 @@ module oce_adv_tra_ver_interfaces interface ! implicit 1st order upwind vertical advection with to solve for fct_LO ! updates the input tracer ttf - subroutine adv_tra_vert_impl(ttf, w, mesh) + subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) use mod_mesh - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(inout) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) end subroutine !=============================================================================== ! 1st order upwind (explicit) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, flux, init_zero) + subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: o_init_zero end subroutine !=============================================================================== ! QR (4th order centerd) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_qr4c(ttf, w, do_Xmoment, mesh, num_ord, flux, init_zero) + subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, o_init_zero) use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: o_init_zero end subroutine !=============================================================================== ! Vertical advection with PPM reconstruction (5th order) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) + subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer :: n, nz, nl1 - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP) :: tvert(mesh%nl), tv - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: o_init_zero end subroutine ! central difference reconstruction (2nd order, use only with FCT) ! returns flux given at vertical interfaces of scalar volumes -! IF init_zero=.TRUE. : flux will be set to zero before computation -! IF init_zero=.FALSE. : flux=flux-input flux +! IF o_init_zero=.TRUE. : flux will be set to zero before computation +! IF o_init_zero=.FALSE. : flux=flux-input flux ! flux is not multiplied with dt - subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) + subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh integer :: n, nz, nl1 - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP) :: tvert(mesh%nl), tv - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: o_init_zero end subroutine end interface end module !=============================================================================== -subroutine adv_tra_vert_impl(ttf, w, mesh) +subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) use MOD_MESH - use O_MESH - use o_PARAM - use o_ARRAYS - use i_ARRAYS - use g_PARSUP - use g_CONFIG - use g_forcing_arrays - use o_mixing_KPP_mod !for ghats _GO_ - + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + use g_comm_auto + implicit none + real(kind=WP), intent(in) , target :: dt + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in) , target :: mesh - real(kind=WP), intent(inout) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) + real(kind=WP), intent(inout) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) - integer :: nz, n, nzmax, nzmin, tr_num + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) + integer :: nz, n, nzmax, nzmin real(kind=WP) :: m, zinv, dt_inv, dz real(kind=WP) :: c1, v_adv -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" dt_inv=1.0_WP/dt - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(a, b, c, tr, cp, tp, n, nz, nzmax, nzmin, m, zinv, dz, c1, v_adv) +!$OMP DO !___________________________________________________________________________ ! loop over local nodes do n=1,myDim_nod2D @@ -146,19 +156,28 @@ subroutine adv_tra_vert_impl(ttf, w, mesh) ! 1/dz(nz) zinv=1.0_WP*dt ! no .../(zbar(1)-zbar(2)) because of ALE + !!PS a(nz)=0.0_WP + !!PS v_adv=zinv*areasvol(nz+1,n)/areasvol(nz,n) + !!PS b(nz)= hnode_new(nz,n)+W(nz, n)*zinv-min(0._WP, W(nz+1, n))*v_adv + !!PS c(nz)=-max(0._WP, W(nz+1, n))*v_adv + a(nz)=0.0_WP - v_adv=zinv*area(nz+1,n)/area(nz,n) - b(nz)= hnode_new(nz,n)+W(nz, n)*zinv-min(0._WP, W(nz+1, n))*v_adv + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + b(nz)= hnode_new(nz,n)+W(nz, n)*v_adv + + v_adv=zinv*area(nz+1,n)/areasvol(nz,n) + b(nz)= b(nz)-min(0._WP, W(nz+1, n))*v_adv c(nz)=-max(0._WP, W(nz+1, n))*v_adv !_______________________________________________________________________ ! Regular part of coefficients: --> 2nd...nl-2 layer do nz=nzmin+1, nzmax-2 ! update from the vertical advection - a(nz)=min(0._WP, W(nz, n))*zinv - b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*zinv + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + a(nz)=min(0._WP, W(nz, n))*v_adv + b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*v_adv - v_adv=zinv*area(nz+1,n)/area(nz,n) + v_adv=zinv*area(nz+1,n)/areasvol(nz,n) b(nz)=b(nz)-min(0._WP, W(nz+1, n))*v_adv c(nz)= -max(0._WP, W(nz+1, n))*v_adv end do ! --> do nz=2, nzmax-2 @@ -167,8 +186,12 @@ subroutine adv_tra_vert_impl(ttf, w, mesh) ! Regular part of coefficients: --> nl-1 layer nz=nzmax-1 ! update from the vertical advection - a(nz)= min(0._WP, W(nz, n))*zinv - b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*zinv + !!PS a(nz)= min(0._WP, W(nz, n))*zinv + !!PS b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*zinv + !!PS c(nz)=0.0_WP + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + a(nz)= min(0._WP, W(nz, n))*v_adv + b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*v_adv c(nz)=0.0_WP !_______________________________________________________________________ @@ -211,33 +234,47 @@ subroutine adv_tra_vert_impl(ttf, w, mesh) ttf(nz,n)=ttf(nz,n)+tr(nz) end do end do ! --> do n=1,myDim_nod2D +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_vert_impl ! ! !=============================================================================== -subroutine adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, flux, init_zero) - use g_config +subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_forcing_arrays + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + use g_comm_auto + implicit none + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP) :: tvert(mesh%nl) integer :: n, nz, nzmax, nzmin - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) - logical, optional :: init_zero -#include "associate_mesh.h" + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: o_init_zero + logical :: l_init_zero +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO + end if +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert, n, nz, nzmax, nzmin) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ @@ -262,41 +299,54 @@ subroutine adv_tra_ver_upw1(ttf, w, do_Xmoment, mesh, flux, init_zero) ! vert. flux at remaining levels do nz=nzmin+1,nzmax-1 flux(nz,n)=-0.5*( & - (ttf(nz ,n)**do_Xmoment)*(W(nz,n)+abs(W(nz,n)))+ & - (ttf(nz-1,n)**do_Xmoment)*(W(nz,n)-abs(W(nz,n))))*area(nz,n)-flux(nz,n) + ttf(nz ,n)*(W(nz,n)+abs(W(nz,n)))+ & + ttf(nz-1,n)*(W(nz,n)-abs(W(nz,n))))*area(nz,n)-flux(nz,n) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_ver_upw1 ! ! !=============================================================================== -subroutine adv_tra_ver_qr4c(ttf, w, do_Xmoment, mesh, num_ord, flux, init_zero) - use g_config +subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, o_init_zero) use MOD_MESH use o_ARRAYS use o_PARAM - use g_PARSUP - use g_forcing_arrays + USE MOD_PARTIT + USE MOD_PARSUP implicit none + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: tvert(mesh%nl) integer :: n, nz, nzmax, nzmin real(kind=WP) :: Tmean, Tmean1, Tmean2 real(kind=WP) :: qc, qu, qd -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO end if +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert,n, nz, nzmax, nzmin, Tmean, Tmean1, Tmean2, qc, qu,qd) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ @@ -336,40 +386,51 @@ subroutine adv_tra_ver_qr4c(ttf, w, do_Xmoment, mesh, num_ord, flux, init_zero) Tmean1=ttf(nz ,n)+(2*qc+qu)*(zbar_3d_n(nz,n)-Z_3d_n(nz ,n))/3.0_WP Tmean2=ttf(nz-1,n)+(2*qc+qd)*(zbar_3d_n(nz,n)-Z_3d_n(nz-1,n))/3.0_WP - Tmean =(W(nz,n)+abs(W(nz,n)))*(Tmean1**do_Xmoment)+(W(nz,n)-abs(W(nz,n)))*(Tmean2**do_Xmoment) - ! flux(nz,n)=-0.5_WP*(num_ord*(Tmean1+Tmean2)*W(nz,n)+(1.0_WP-num_ord)*Tmean)*area(nz,n)-flux(nz,n) - flux(nz,n)=(-0.5_WP*(1.0_WP-num_ord)*Tmean - num_ord*((0.5_WP*(Tmean1+Tmean2))**do_Xmoment)*W(nz,n))*area(nz,n)-flux(nz,n) + Tmean =(W(nz,n)+abs(W(nz,n)))*Tmean1+(W(nz,n)-abs(W(nz,n)))*Tmean2 + flux(nz,n)=(-0.5_WP*(1.0_WP-num_ord)*Tmean - num_ord*(0.5_WP*(Tmean1+Tmean2))*W(nz,n))*area(nz,n)-flux(nz,n) end do end do +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_ver_qr4c ! ! !=============================================================================== -subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) - use g_config +subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_forcing_arrays + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + use g_comm_auto implicit none + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in) , target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport - real(kind=WP), intent(in) :: ttf (mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: o_init_zero + logical :: l_init_zero real(kind=WP) :: tvert(mesh%nl), tv(mesh%nl), aL, aR, aj, x real(kind=WP) :: dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1 integer :: n, nz, nzmax, nzmin - integer :: overshoot_counter, counter +! integer :: overshoot_counter, counter -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO end if ! -------------------------------------------------------------------------- @@ -380,8 +441,10 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) ! non-uniformity into account, but this is more cumbersome. This is the version for AB ! time stepping ! -------------------------------------------------------------------------- - overshoot_counter=0 - counter =0 +! overshoot_counter=0 +! counter =0 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tvert, tv, aL, aR, aj, x, dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1, n, nz, nzmax, nzmin) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ !Interpolate to zbar...depth levels --> all quantities (tracer ...) are @@ -390,18 +453,16 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) nzmax=nlevels_nod2D(n) nzmin=ulevels_nod2D(n) - ! tracer at surface layer - tv(nzmin)=ttf(nzmin,n) - - ! tracer at surface+1 layer - ! tv(2)=-ttf(1,n)*min(sign(1.0, W(2,n)), 0._WP)+ttf(2,n)*max(sign(1.0, W(2,n)), 0._WP) - tv(nzmin+1)=0.5*(ttf(nzmin,n)+ttf(nzmin+1,n)) - - ! tacer at bottom-1 layer - !tv(nzmax-1)=-ttf(nzmax-2,n)*min(sign(1.0, W(nzmax-1,n)), 0._WP)+ttf(nzmax-1,n)*max(sign(1.0, W(nzmax-1,n)), 0._WP) - tv(nzmax-1)=0.5_WP*(ttf(nzmax-2,n)+ttf(nzmax-1,n)) - - ! tracer at bottom layer + ! tracer at surface level + tv(nzmin)=ttf(nzmin,n) + ! tracer at surface+1 level +! tv(2)=-ttf(1,n)*min(sign(1.0, W(2,n)), 0._WP)+ttf(2,n)*max(sign(1.0, W(2,n)), 0._WP) +! tv(3)=-ttf(2,n)*min(sign(1.0, W(3,n)), 0._WP)+ttf(3,n)*max(sign(1.0, W(3,n)), 0._WP) + tv(nzmin+1)=0.5*(ttf(nzmin, n)+ttf(nzmin+1,n)) + ! tacer at bottom-1 level + tv(nzmax-1)=-ttf(nzmax-2,n)*min(sign(1.0_wp, W(nzmax-1,n)), 0._WP)+ttf(nzmax-1,n)*max(sign(1.0_wp, W(nzmax-1,n)), 0._WP) +! tv(nzmax-1)=0.5_WP*(ttf(nzmax-2,n)+ttf(nzmax-1,n)) + ! tracer at bottom level tv(nzmax)=ttf(nzmax-1,n) !_______________________________________________________________________ @@ -409,7 +470,7 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) ! see Colella and Woodward, JCP, 1984, 174-201 --> equation (1.9) ! loop over layers (segments) !!PS do nz=3, nzmax-3 - do nz=nzmin+2, nzmax-2 + do nz=nzmin+1, nzmax-3 !___________________________________________________________________ ! for uniform spaced vertical grids --> piecewise parabolic method (ppm) ! equation (1.9) @@ -419,10 +480,10 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) ! for non-uniformity spaced vertical grids --> piecewise parabolic ! method (ppm) see see Colella and Woodward, JCP, 1984, 174-201 ! --> full equation (1.6), (1.7) and (1.8) - dzjm1 = hnode_new(nz-2,n) - dzj = hnode_new(nz-1,n) - dzjp1 = hnode_new(nz,n) - dzjp2 = hnode_new(nz+1,n) + dzjm1 = hnode_new(nz-1,n) + dzj = hnode_new(nz ,n) + dzjp1 = hnode_new(nz+1,n) + dzjp2 = hnode_new(nz+2,n) ! Be carefull here vertical operation have to be done on NEW vertical mesh !!! !___________________________________________________________________ @@ -482,12 +543,12 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) ! loop over layers (segments) do nz=nzmin, nzmax-1 if ((W(nz,n)<=0._WP) .AND. (W(nz+1,n)>=0._WP)) CYCLE - counter=counter+1 + !counter=counter+1 aL=tv(nz) aR=tv(nz+1) if ((aR-ttf(nz, n))*(ttf(nz, n)-aL)<=0._WP) then ! write(*,*) aL, ttf(nz, n), aR - overshoot_counter=overshoot_counter+1 + ! overshoot_counter=overshoot_counter+1 aL =ttf(nz, n) aR =ttf(nz, n) end if @@ -504,54 +565,67 @@ subroutine adv_tra_vert_ppm(ttf, w, do_Xmoment, mesh, flux, init_zero) if (W(nz,n)>0._WP) then x=min(W(nz,n)*dt/dzj, 1._WP) tvert(nz )=(-aL-0.5_WP*x*(aR-aL+(1._WP-2._WP/3._WP*x)*aj)) - tvert(nz )=( tvert(nz)**do_Xmoment ) ! compute 2nd moment for DVD + tvert(nz )=tvert(nz) ! compute 2nd moment for DVD tvert(nz )=tvert(nz)*area(nz,n)*W(nz,n) end if if (W(nz+1,n)<0._WP) then x=min(-W(nz+1,n)*dt/dzj, 1._WP) tvert(nz+1)=(-aR+0.5_WP*x*(aR-aL-(1._WP-2._WP/3._WP*x)*aj)) - tvert(nz+1)=( tvert(nz+1)**do_Xmoment ) ! compute 2nd moment for DVD + tvert(nz+1)=tvert(nz+1) ! compute 2nd moment for DVD tvert(nz+1)=tvert(nz+1)*area(nz+1,n)*W(nz+1,n) end if end do !_______________________________________________________________________ ! Surface flux - tvert(nzmin)= -( tv(nzmin)**do_Xmoment )*W(nzmin,n)*area(nzmin,n) + tvert(nzmin)= -tv(nzmin)*W(nzmin,n)*area(nzmin,n) ! Zero bottom flux tvert(nzmax)=0.0_WP flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) end do ! --> do n=1, myDim_nod2D ! if (mype==0) write(*,*) 'PPM overshoot statistics:', real(overshoot_counter)/real(counter) +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_vert_ppm ! ! !=============================================================================== -subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) - use g_config +subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, o_init_zero) use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_forcing_arrays + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + use g_comm_auto implicit none + type(t_partit),intent(in), target :: partit type(t_mesh), intent(in), target :: mesh - integer, intent(in) :: do_Xmoment !--> = [1,2] compute 1st & 2nd moment of tracer transport - real(kind=WP), intent(in) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(in) :: W (mesh%nl, myDim_nod2D+eDim_nod2D) - real(kind=WP), intent(inout) :: flux(mesh%nl, myDim_nod2D) - logical, optional :: init_zero + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: o_init_zero + logical :: l_init_zero integer :: n, nz, nzmax, nzmin real(kind=WP) :: tvert(mesh%nl), tv -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - if (present(init_zero))then - if (init_zero) flux=0.0_WP - else - flux=0.0_WP + l_init_zero=.true. + if (present(o_init_zero)) then + l_init_zero=o_init_zero + end if + if (l_init_zero) then +!$OMP PARALLEL DO + do n=1, myDim_nod2D + flux(:, n)=0.0_WP + end do +!$OMP END PARALLEL DO end if +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, tv, tvert) +!$OMP DO do n=1, myDim_nod2D !_______________________________________________________________________ nzmax=nlevels_nod2D(n)-1 @@ -559,7 +633,7 @@ subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) !_______________________________________________________________________ ! Surface flux - tvert(nzmin)= -W(nzmin,n)*(ttf(nzmin,n)**do_Xmoment)*area(nzmin,n) + tvert(nzmin)= -W(nzmin,n)*ttf(nzmin,n)*area(nzmin,n) !_______________________________________________________________________ ! Zero bottom flux @@ -569,11 +643,12 @@ subroutine adv_tra_ver_cdiff(ttf, w, do_Xmoment, mesh, flux, init_zero) ! Other levels do nz=nzmin+1, nzmax tv=0.5_WP*(ttf(nz-1,n)+ttf(nz,n)) - tv=tv**do_Xmoment tvert(nz)= -tv*W(nz,n)*area(nz,n) end do !_______________________________________________________________________ flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) end do ! --> do n=1, myDim_nod2D +!$OMP END DO +!$OMP END PARALLEL end subroutine adv_tra_ver_cdiff diff --git a/src/oce_ale.F90 b/src/oce_ale.F90 index 12e1390d0..386a7caca 100644 --- a/src/oce_ale.F90 +++ b/src/oce_ale.F90 @@ -1,62 +1,151 @@ module oce_ale_interfaces - interface - subroutine init_bottom_elem_thickness(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine init_bottom_node_thickness(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine init_surface_elem_depth(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine init_surface_node_depth(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine impl_vert_visc_ale(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine update_stiff_mat_ale(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine compute_ssh_rhs_ale(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine solve_ssh_ale(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine compute_hbar_ale(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine vert_vel_ale(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - - subroutine update_thickness_ale(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface + interface + subroutine init_bottom_elem_thickness(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine init_bottom_node_thickness(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine init_surface_elem_depth(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine init_surface_node_depth(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine impl_vert_visc_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine update_stiff_mat_ale(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine solve_ssh_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine compute_hbar_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine vert_vel_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + + subroutine update_thickness_ale(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface +end module + +module init_ale_interface + interface + subroutine init_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface +end module + +module init_thickness_ale_interface + interface + subroutine init_thickness_ale(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface end module +module oce_timestep_ale_interface + interface + subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + use MOD_ICE + integer , intent(in) :: n + type(t_dyn) , intent(inout), target :: dynamics + type(t_ice), intent(inout), target :: ice + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + end subroutine + end interface +end module ! CONTENT: ! ------------ ! subroutine ale_init @@ -79,58 +168,81 @@ subroutine update_thickness_ale(mesh) ! !=============================================================================== ! allocate & initialise arrays for Arbitrary-Langrangian-Eularian (ALE) method -subroutine init_ale(mesh) +subroutine init_ale(dynamics, partit, mesh) USE o_PARAM USE MOD_MESH - USE g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN USE o_ARRAYS USE g_config, only: which_ale, use_cavity, use_partial_cell USE g_forcing_param, only: use_virt_salt use oce_ale_interfaces Implicit NONE - - integer :: n, nzmax, nzmin, elnodes(3), elem - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: n, nzmax, nzmin, elnodes(3), elem + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + !___allocate________________________________________________________________ ! hnode and hnode_new: layer thicknesses at nodes. - allocate(hnode(1:nl-1, myDim_nod2D+eDim_nod2D)) - allocate(hnode_new(1:nl-1, myDim_nod2D+eDim_nod2D)) + allocate(mesh%hnode(1:nl-1, myDim_nod2D+eDim_nod2D)) + allocate(mesh%hnode_new(1:nl-1, myDim_nod2D+eDim_nod2D)) ! ssh_rhs_old: auxiliary array to store an intermediate part of the rhs computations. - allocate(ssh_rhs_old(myDim_nod2D+eDim_nod2D)) + allocate(dynamics%ssh_rhs_old(myDim_nod2D+eDim_nod2D)) + dynamics%ssh_rhs_old = 0.0_WP ! hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. - allocate(hbar(myDim_nod2D+eDim_nod2D)) - allocate(hbar_old(myDim_nod2D+eDim_nod2D)) + allocate(mesh%hbar(myDim_nod2D+eDim_nod2D)) + allocate(mesh%hbar_old(myDim_nod2D+eDim_nod2D)) ! helem: layer thickness at elements. It is interpolated from hnode. - allocate(helem(1:nl-1, myDim_elem2D)) + allocate(mesh%helem(1:nl-1, myDim_elem2D)) ! dhe: The increment of total fluid depth on elements. It is used to update the matrix ! of the ssh operator. - allocate(dhe(myDim_elem2D)) + allocate(mesh%dhe(myDim_elem2D)) - ! zbar_n: depth of layers due to ale thinkness variactions at ervery node n - allocate(zbar_n(nl)) - allocate(zbar_3d_n(nl,myDim_nod2D+eDim_nod2D)) + allocate(mesh%zbar_3d_n(nl,myDim_nod2D+eDim_nod2D)) ! Z_n: mid depth of layers due to ale thinkness variactions at ervery node n - allocate(Z_n(nl-1)) - allocate(Z_3d_n(nl-1,myDim_nod2D+eDim_nod2D)) + allocate(mesh%Z_3d_n(nl-1,myDim_nod2D+eDim_nod2D)) ! bottom_elem_tickness: changed bottom layer thinkness due to partial cells - allocate(bottom_elem_thickness(myDim_elem2D)) - allocate(zbar_e_bot(myDim_elem2D+eDim_elem2D)) - allocate(zbar_e_srf(myDim_elem2D+eDim_elem2D)) + allocate(mesh%bottom_elem_thickness(myDim_elem2D)) + allocate(mesh%zbar_e_bot(myDim_elem2D+eDim_elem2D)) + allocate(mesh%zbar_e_srf(myDim_elem2D+eDim_elem2D)) ! also change bottom thickness at nodes due to partial cell --> bottom - ! thickness at nodes is the volume weighted mean of sorounding elemental + ! thickness at nodes is the volume weighted mean of sorounding elemental ! thicknesses - allocate(bottom_node_thickness(myDim_nod2D+eDim_nod2D)) - allocate(zbar_n_bot(myDim_nod2D+eDim_nod2D)) - allocate(zbar_n_srf(myDim_nod2D+eDim_nod2D)) - + allocate(mesh%bottom_node_thickness(myDim_nod2D+eDim_nod2D)) + allocate(mesh%zbar_n_bot(myDim_nod2D+eDim_nod2D)) + allocate(mesh%zbar_n_srf(myDim_nod2D+eDim_nod2D)) + + ! reassociate after the allocation (no pointer exists before) + hnode(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode(:,:) + hnode_new(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%hnode_new(:,:) + zbar_3d_n(1:mesh%nl, 1:myDim_nod2D+eDim_nod2D) => mesh%zbar_3d_n(:,:) + Z_3d_n(1:mesh%nl-1, 1:myDim_nod2D+eDim_nod2D) => mesh%Z_3d_n(:,:) + helem(1:mesh%nl-1, 1:myDim_elem2D) => mesh%helem(:,:) + bottom_elem_thickness(1:myDim_elem2D) => mesh%bottom_elem_thickness(:) + bottom_node_thickness(1:myDim_nod2D+eDim_nod2D) => mesh%bottom_node_thickness(:) + dhe(1:myDim_elem2D) => mesh%dhe(:) + hbar(1:myDim_nod2D+eDim_nod2D) => mesh%hbar(:) + hbar_old(1:myDim_nod2D+eDim_nod2D) => mesh%hbar_old(:) + zbar_n_bot(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_bot(:) + zbar_e_bot(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_bot(:) + zbar_n_srf(1:myDim_nod2D+eDim_nod2D) => mesh%zbar_n_srf(:) + zbar_e_srf(1:myDim_elem2D+eDim_elem2D) => mesh%zbar_e_srf(:) !___initialize______________________________________________________________ hbar = 0.0_WP hbar_old = 0.0_WP @@ -143,14 +255,14 @@ subroutine init_ale(mesh) ! of partial cell bootom layer zbar_n_bot = 0.0 zbar_e_bot = 0.0 - call init_bottom_elem_thickness(mesh) - call init_bottom_node_thickness(mesh) + call init_bottom_elem_thickness(partit, mesh) + call init_bottom_node_thickness(partit, mesh) ! compute depth of partial cell ocean-cavity interface zbar_n_srf = zbar(1) zbar_e_srf = zbar(1) - call init_surface_elem_depth(mesh) - call init_surface_node_depth(mesh) + call init_surface_elem_depth(partit, mesh) + call init_surface_node_depth(partit, mesh) !___________________________________________________________________________ ! initialise 3d field of depth levels and mid-depth levels @@ -195,20 +307,27 @@ end subroutine init_ale ! ! !=============================================================================== -subroutine init_bottom_elem_thickness(mesh) +subroutine init_bottom_elem_thickness(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS - use g_config,only: use_partial_cell, partial_cell_thresh + use g_config,only: use_partial_cell, partial_cell_thresh, use_depthonelem use g_comm_auto use g_support implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: elem, elnodes(3), nle real(kind=WP) :: dd - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! If we use partial cells, the thickness of bottom cell is adjusted. @@ -217,10 +336,15 @@ subroutine init_bottom_elem_thickness(mesh) if(use_partial_cell) then !Adjust the thickness of elemental bottom cells do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - ! elemental topographic depth - dd=sum(depth(elnodes))/3.0_WP + !___________________________________________________________________ + if (use_depthonelem) then + dd=depth(elem) + else + elnodes=elem2D_nodes(:,elem) + ! elemental topographic depth + dd=sum(depth(elnodes))/3.0_WP + end if ! number of full depth levels at elem nle=nlevels(elem) @@ -307,27 +431,34 @@ subroutine init_bottom_elem_thickness(mesh) end if !___________________________________________________________________________ - call exchange_elem(zbar_e_bot) + call exchange_elem(zbar_e_bot, partit) end subroutine init_bottom_elem_thickness ! ! !=============================================================================== -subroutine init_bottom_node_thickness(mesh) +subroutine init_bottom_node_thickness(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: use_partial_cell use g_comm_auto use g_support implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: node, nln, elem, elemi, nelem real(kind=WP) :: dd real(kind=WP) :: hnbot, tvol - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! If we use partial cells, the thickness of bottom cell is adjusted. @@ -412,28 +543,34 @@ subroutine init_bottom_node_thickness(mesh) end if ! --> if(use_partial_cell) then !___________________________________________________________________________ - call exchange_nod(zbar_n_bot) - call exchange_nod(bottom_node_thickness) + call exchange_nod(zbar_n_bot, partit) + call exchange_nod(bottom_node_thickness, partit) end subroutine init_bottom_node_thickness ! ! !=============================================================================== -subroutine init_surface_elem_depth(mesh) +subroutine init_surface_elem_depth(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS - use g_config,only: use_cavity, use_cavity_partial_cell, cavity_partial_cell_thresh + use g_config,only: use_cavity, use_cavity_partial_cell, cavity_partial_cell_thresh, use_cavityonelem use g_comm_auto use g_support implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: elem, elnodes(3), ule real(kind=WP) :: dd - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (use_cavity) then @@ -447,15 +584,20 @@ subroutine init_surface_elem_depth(mesh) ule=ulevels(elem) if (ule==1) cycle - !___________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - !___________________________________________________________________ ! elemental cavity depth if (use_cavity_partial_cell) then - dd=sum(cavity_depth(elnodes))/3.0_WP + + !_______________________________________________________________ + if (use_cavityonelem) then + dd=cavity_depth(elem) + else + elnodes=elem2D_nodes(:,elem) + ! elemental cavity depth + dd=sum(cavity_depth(elnodes))/3.0_WP + end if - !___________________________________________________________________ + !_______________________________________________________________ ! Only apply Surface Partial Cells when the initial full cell surface ! layer thickness is above the treshhold cavity_partial_cell_thresh if (zbar(ule)-zbar(ule+1)<=cavity_partial_cell_thresh) then @@ -486,27 +628,35 @@ subroutine init_surface_elem_depth(mesh) end do ! --> do elem=1, myDim_elem2D !_______________________________________________________________________ - call exchange_elem(zbar_e_srf) + call exchange_elem(zbar_e_srf, partit) end if end subroutine init_surface_elem_depth ! ! !=============================================================================== -subroutine init_surface_node_depth(mesh) +subroutine init_surface_node_depth(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: use_cavity, use_cavity_partial_cell use g_comm_auto use g_support implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: node, uln, nelem, elemi real(kind=WP) :: dd - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + !___________________________________________________________________________ if (use_cavity) then !___________________________________________________________________________ ! If we use partial cells and cavity, the thickness of surface cell is adjusted. @@ -539,14 +689,14 @@ subroutine init_surface_node_depth(mesh) end do ! --> do node=1, myDim_nod2D+eDim_nod2D !_______________________________________________________________________ - call exchange_nod(zbar_n_srf) + call exchange_nod(zbar_n_srf, partit) end if end subroutine init_surface_node_depth ! ! !=============================================================================== ! initialize thickness arrays based on the current hbar -subroutine init_thickness_ale(mesh) +subroutine init_thickness_ale(dynamics, partit, mesh) ! For z-star case: we stretch scalar thicknesses (nodal) ! through nlevels_nod2D_min -2 layers. Layer nlevels_nod2D_min-1 ! should not be touched if partial cell is implemented (it is). @@ -555,15 +705,27 @@ subroutine init_thickness_ale(mesh) use g_config,only: dt, which_ale use o_PARAM use MOD_MESH - use O_MESH - use g_PARSUP - use o_ARRAYS + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN implicit none - integer :: n, nz, elem, elnodes(3), nzmin, nzmax + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: n, nz, elem, elnodes(3), nzmin, nzmax real(kind=WP) :: dd - type(t_mesh), intent(in) , target :: mesh + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: ssh_rhs_old, eta_n +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ssh_rhs_old=>dynamics%ssh_rhs_old(:) + eta_n =>dynamics%eta_n(:) -#include "associate_mesh.h" + !___________________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' @@ -572,7 +734,10 @@ subroutine init_thickness_ale(mesh) end if !___________________________________________________________________________ ! Fill in ssh_rhs_old - ssh_rhs_old=(hbar-hbar_old)*area(1,:)/dt + !!PS ssh_rhs_old=(hbar-hbar_old)*area(1,:)/dt + do n=1,myDim_nod2D+eDim_nod2D + ssh_rhs_old(n)=(hbar(n)-hbar_old(n))*areasvol(ulevels_nod2D(n),n)/dt ! --> TEST_cavity + end do ! -->see equation (14) FESOM2:from finite elements to finie volume eta_n=alpha*hbar_old+(1.0_WP-alpha)*hbar @@ -780,31 +945,39 @@ subroutine init_thickness_ale(mesh) write(*,*) write(*,*) '____________________________________________________________' write(*,*) 'The vertical ALE discretisation ', which_ale,' is currently not supported!!!' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end if endif !___________________________________________________________________________ hnode_new=hnode ! Should be initialized, because only variable part is updated. + !!PS call check_total_volume(partit, mesh) + end subroutine init_thickness_ale ! ! !=============================================================================== ! update thickness arrays based on the current hbar -subroutine update_thickness_ale(mesh) +subroutine update_thickness_ale(partit, mesh) use o_PARAM use MOD_MESH - use O_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: n, nz, elem, elnodes(3),nzmax, nzmin integer , dimension(:), allocatable :: idx - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! >->->->->->->->->->->->->->-> z-level <-<-<-<-<-<-<-<-<-<-<-<-< @@ -817,8 +990,9 @@ subroutine update_thickness_ale(mesh) allocate(idx(lzstar_lev)) ! if lzstar_lev=4 --> idx = /1,2,3,4/ - idx = (/(nz,nz=1,lzstar_lev,1)/) - + idx = (/(nz, nz=1, lzstar_lev, 1)/) +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, elem, elnodes) +!$OMP DO !_______________________________________________________________________ do elem=1,myDim_elem2D elnodes=elem2D_nodes(:, elem) @@ -832,21 +1006,6 @@ subroutine update_thickness_ale(mesh) !___________________________________________________________________ ! actualize elemental layer thinkness in first lzstar_lev layers -!!PS if (any(hnode_new(2:lzstar_lev,elnodes(1))-hnode(2:lzstar_lev,elnodes(1))/=0.0_WP) .or. & -!!PS any(hnode_new(2:lzstar_lev,elnodes(2))-hnode(2:lzstar_lev,elnodes(2))/=0.0_WP) .or. & -!!PS any(hnode_new(2:lzstar_lev,elnodes(3))-hnode(2:lzstar_lev,elnodes(3))/=0.0_WP) & -!!PS ) then -!!PS ! --> case local zstar -!!PS ! try to limitate over how much layers i realy need to distribute -!!PS ! the change in ssh, so that the next loops run only over the -!!PS ! nesseccary levels and not over all lzstar_lev levels -!!PS nz = max(1 ,maxval(pack(idx,hnode_new(1:lzstar_lev,elnodes(1))-hnode(1:lzstar_lev,elnodes(1))/=0.0_WP))) -!!PS nz = max(nz,maxval(pack(idx,hnode_new(1:lzstar_lev,elnodes(2))-hnode(1:lzstar_lev,elnodes(2))/=0.0_WP))) -!!PS nz = max(nz,maxval(pack(idx,hnode_new(1:lzstar_lev,elnodes(3))-hnode(1:lzstar_lev,elnodes(3))/=0.0_WP))) -!!PS nzmax = min(nz,nlevels(elem)-2) -!!PS do nz=1,nzmax -!!PS helem(nz,elem)=sum(hnode_new(nz,elnodes))/3.0_WP -!!PS end do if (any(hnode_new(nzmin+1:nzmin+lzstar_lev-1,elnodes(1)) - hnode(nzmin+1:nzmin+lzstar_lev-1,elnodes(1))/=0.0_WP) .or. & any(hnode_new(nzmin+1:nzmin+lzstar_lev-1,elnodes(2)) - hnode(nzmin+1:nzmin+lzstar_lev-1,elnodes(2))/=0.0_WP) .or. & any(hnode_new(nzmin+1:nzmin+lzstar_lev-1,elnodes(3)) - hnode(nzmin+1:nzmin+lzstar_lev-1,elnodes(3))/=0.0_WP) & @@ -869,10 +1028,10 @@ subroutine update_thickness_ale(mesh) helem(nzmin,elem)=sum(hnode_new(nzmin,elnodes))/3.0_WP end if end do - +!$OMP END DO !_______________________________________________________________________ +!$OMP DO do n=1,myDim_nod2D+eDim_nod2D - !!PS nzmin = ulevels_nod2D(n) nzmin = ulevels_nod2D_max(n) nzmax = nlevels_nod2D_min(n)-1 @@ -883,24 +1042,6 @@ subroutine update_thickness_ale(mesh) !___________________________________________________________________ ! actualize layer thinkness in first lzstar_lev layers -!!PS if ( (any(hnode_new(2:lzstar_lev,n)-hnode(2:lzstar_lev,n)/=0.0_WP)) ) then -!!PS ! --> case local zstar -!!PS ! try to limitate over how much layers i realy need to distribute -!!PS ! the change in ssh, so that the next loops run only over the -!!PS ! nesseccary levels and not over all lzstar_lev levels -!!PS nz = max(1,maxval(pack(idx,hnode_new(1:lzstar_lev,n)-hnode(1:lzstar_lev,n)/=0.0_WP))) -!!PS -!!PS ! nlevels_nod2D_min(n)-1 ...would be hnode of partial bottom cell but this -!!PS ! one is not allowed to change so go until nlevels_nod2D_min(n)-2 -!!PS nzmax = min(nz,nlevels_nod2D_min(n)-2) -!!PS ! do not touch zbars_3d_n that are involved in the bottom cell !!!! -!!PS ! this ones are set up during initialisation and are not touched afterwards -!!PS ! --> nlevels_nod2D_min(n),nlevels_nod2D_min(n)-1 -!!PS do nz=nzmax,1,-1 -!!PS hnode(nz,n) = hnode_new(nz,n) -!!PS zbar_3d_n(nz,n) = zbar_3d_n(nz+1,n)+hnode_new(nz,n) -!!PS Z_3d_n(nz,n) = zbar_3d_n(nz+1,n)+hnode_new(nz,n)/2.0_WP -!!PS end do if ( (any(hnode_new(nzmin+1:nzmin+lzstar_lev-1,n)-hnode(nzmin+1:nzmin+lzstar_lev-1,n)/=0.0_WP)) ) then ! --> case local zstar ! try to limitate over how much layers i realy need to distribute @@ -923,17 +1064,14 @@ subroutine update_thickness_ale(mesh) !___________________________________________________________________ ! only actualize layer thinkness in first layer else -!!PS ! --> case normal zlevel -!!PS hnode(1,n) = hnode_new(1,n) -!!PS zbar_3d_n(1,n)= zbar_3d_n(2,n)+hnode_new(1,n) -!!PS Z_3d_n(1,n) = zbar_3d_n(2,n)+hnode_new(1,n)/2.0_WP ! --> case normal zlevel hnode(nzmin,n) = hnode_new(nzmin,n) zbar_3d_n(nzmin,n)= zbar_3d_n(nzmin+1,n)+hnode_new(nzmin,n) Z_3d_n(nzmin,n) = zbar_3d_n(nzmin+1,n)+hnode_new(nzmin,n)/2.0_WP end if end do - +!$OMP END DO +!$OMP END PARALLEL !_______________________________________________________________________ deallocate(idx) @@ -943,10 +1081,10 @@ subroutine update_thickness_ale(mesh) elseif (trim(which_ale)=='zstar' ) then ! --> update layer thinkness, depth layer and mid-depth layer at node +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D+eDim_nod2D ! actualize 3d depth levels and mid-depth levels from bottom to top nzmin = ulevels_nod2D(n) -!!PS nzmin = ulevels_nod2D_max(n) nzmax = nlevels_nod2D_min(n)-2 !___________________________________________________________________ @@ -957,20 +1095,19 @@ subroutine update_thickness_ale(mesh) !___________________________________________________________________ ! do not touch zbars_3d_n that are involved in the bottom cell !!!! ! --> nlevels_nod2D_min(n),nlevels_nod2D_min(n)-1 - !!PS do nz=nzmax,1,-1 - do nz=nzmax,nzmin,-1 + do nz=nzmax, nzmin,-1 hnode(nz,n) = hnode_new(nz,n) zbar_3d_n(nz,n) = zbar_3d_n(nz+1,n) + hnode_new(nz,n) Z_3d_n(nz,n) = zbar_3d_n(nz+1,n) + hnode_new(nz,n)/2.0_WP end do end do - +!$OMP END PARALLEL DO !_______________________________________________________________________ ! --> update mean layer thinkness at element +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax) do elem=1, myDim_elem2D nzmin = ulevels(elem) nzmax = nlevels(elem)-1 - !___________________________________________________________________ ! if there is a cavity layer thickness is not updated, its ! kept fixed @@ -978,30 +1115,38 @@ subroutine update_thickness_ale(mesh) !___________________________________________________________________ elnodes=elem2D_nodes(:, elem) - !!PS do nz=1,nlevels(elem)-2 - do nz=nzmin,nzmax-1 + do nz=nzmin, nzmax-1 helem(nz,elem)=sum(hnode(nz,elnodes))/3.0_WP end do end do +!$OMP END PARALLEL DO endif - end subroutine update_thickness_ale ! ! !=============================================================================== ! update thickness arrays based on the current hbar -subroutine restart_thickness_ale(mesh) +subroutine restart_thickness_ale(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS use g_config,only: which_ale,lzstar_lev,min_hnode implicit none + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ integer :: n, nz, elem, elnodes(3), nzmax, nzmin, lcl_lzstar_lev integer , dimension(:), allocatable :: idx - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + !___________________________________________________________________________ if(mype==0) then write(*,*) '____________________________________________________________' write(*,*) ' --> restart ALE layerthicknesses, depth levels and middepth levels' @@ -1090,14 +1235,16 @@ end subroutine restart_thickness_ale ! ! To achive it we should use global arrays n_num and n_pos. ! Reserved for future. -subroutine init_stiff_mat_ale(mesh) +subroutine init_stiff_mat_ale(partit, mesh) use o_PARAM use MOD_MESH - use g_PARSUP - use o_ARRAYS, only:zbar_e_bot, zbar_e_srf + USE MOD_PARTIT + USE MOD_PARSUP use g_CONFIG implicit none - + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + !___________________________________________________________________________ integer :: n, n1, n2, i, j, row, ed, fileID integer :: elnodes(3), el(2) integer :: npos(3), offset, nini, nend @@ -1108,9 +1255,14 @@ subroutine init_stiff_mat_ale(mesh) character(MAX_PATH) :: dist_mesh_dir, file_name real(kind=WP) :: t0, t1 integer :: ierror ! MPI, return error code - type(t_mesh), intent(inout) , target :: mesh -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + !___________________________________________________________________________ t0=MPI_Wtime() if (mype==0) then write(*,*) '____________________________________________________________' @@ -1214,7 +1366,7 @@ subroutine init_stiff_mat_ale(mesh) if (el(i)<1) cycle ! if el(i)<1, it means its an outer boundary edge this ! has only one triangle element to which it contribute - + ! which three nodes span up triangle el(i) ! elnodes ... node indices elnodes=elem2D_nodes(:,el(i)) @@ -1266,8 +1418,12 @@ subroutine init_stiff_mat_ale(mesh) ! 2nd do first term of lhs od equation (18) of "FESOM2 from finite element to finite volumes" ! Mass matrix part do row=1, myDim_nod2D + ! if cavity no time derivative for eta in case of rigid lid approximation at + ! thee cavity-ocean interface, which means cavity-ocean interface is not allowed + ! to move vertically. + if (ulevels_nod2D(row)>1) cycle offset = ssh_stiff%rowptr(row) - SSH_stiff%values(offset) = SSH_stiff%values(offset)+ area(1,row)/dt + SSH_stiff%values(offset) = SSH_stiff%values(offset)+ areasvol(ulevels_nod2D(row),row)/dt end do deallocate(n_pos,n_num) @@ -1369,36 +1525,40 @@ end subroutine init_stiff_mat_ale ! due to changes in ssh is done here ! = ssh_rhs in the update of the stiff matrix ! -subroutine update_stiff_mat_ale(mesh) +subroutine update_stiff_mat_ale(partit, mesh) use g_config,only: dt use o_PARAM use MOD_MESH - use O_MESH - use g_PARSUP + use MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS - ! implicit none - integer :: n, i, j, row, ed,n2 + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: n, i, j, k, row, ed, n2 integer :: enodes(2), elnodes(3), el(2) integer :: elem, npos(3), offset, nini, nend real(kind=WP) :: factor real(kind=WP) :: fx(3), fy(3) - integer, allocatable :: n_num(:) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! update secod term of lhs od equation (18) of "FESOM2 from finite element ! to finite volumes" --> stiff matrix part ! loop over lcal edges - allocate(n_num(myDim_nod2D+eDim_nod2D)) - n_num=0 factor=g*dt*alpha*theta + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, i, j, k, row, ed, n2, enodes, elnodes, el, elem, npos, offset, nini, nend, fx, fy) do ed=1,myDim_edge2D !! Attention ! enodes ... local node indices of nodes that edge ed - enodes=edges(:,ed) - + enodes=edges(:,ed) ! el ... local element indices of the two elments that contribute to edge ! el(1) or el(2) < 0 than edge is boundary edge el=edge_tri(:,ed) @@ -1410,27 +1570,26 @@ subroutine update_stiff_mat_ale(mesh) !___________________________________________________________________ ! sparse indice offset for node with index row - offset=SSH_stiff%rowptr(row)-ssh_stiff%rowptr(1) - ! loop over number of neghbouring nodes of node-row - do n=1,SSH_stiff%rowptr(row+1)-SSH_stiff%rowptr(row) - ! nn_pos ... local indice position of neigbouring nodes - ! n2 ... local indice of n-th neighbouring node to node-row - n2=nn_pos(n,row) - ! n_num(n2) ... global sparse matrix indices of local mesh point n2 - n_num(n2)=offset+n - end do - + offset=SSH_stiff%rowptr(row)-ssh_stiff%rowptr(1) !___________________________________________________________________ - do i=1,2 ! Two elements related to the edge + do i=1, 2 ! Two elements related to the edge ! It should be just grad on elements ! elem ... local element index to calc grad on that element - elem=el(i) - - if(elem<1) cycle - + elem=el(i) + if(elem<1) cycle ! elnodes ... local node indices of nodes that form element elem elnodes=elem2D_nodes(:,elem) - + ! we have to put it here for OMP compatibility. The MPI version might become a bit slower :( + ! loop over number of neghbouring nodes of node-row + do k=1, 3 + do n=1, SSH_stiff%rowptr(row+1)-SSH_stiff%rowptr(row) + ! npos ... global sparse matrix indices of local mesh points elnodes + if (elnodes(k)==nn_pos(n, row)) then + npos(k)=offset+n !start with the next k + EXIT + end if + end do + end do ! here update of second term on lhs of eq. 18 in Danilov etal 2017 ! --> in the initialisation of the stiff matrix the integration went ! over the unperturbed ocean depth using -zbar_e_bot @@ -1447,14 +1606,21 @@ subroutine update_stiff_mat_ale(mesh) ! In the computation above, I've used rules from ssh_rhs (where it is ! on the rhs. So the sign is changed in the expression below. ! npos... sparse matrix indices position of node points elnodes - npos=n_num(elnodes) - SSH_stiff%values(npos)=SSH_stiff%values(npos) + fy*factor - +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(row)) ! it shall be sufficient to block writing into the same row of SSH_stiff +#else +!$OMP ORDERED +#endif + SSH_stiff%values(npos)=SSH_stiff%values(npos) + fy*factor +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(row)) +#else +!$OMP END ORDERED +#endif end do ! --> do i=1,2 end do ! --> do j=1,2 end do ! --> do ed=1,myDim_edge2D - deallocate(n_num) - +!$OMP END PARALLEL DO !DS this check will work only on 0pe because SSH_stiff%rowptr contains global pointers !if (mype==0) then !do row=1, myDim_nod2D @@ -1476,28 +1642,50 @@ end subroutine update_stiff_mat_ale !"FESOM2: from finite elements to finite volumes" ! ! ssh_rhs = alpha * grad[ int_hbot^hbar(n+0.5)( u^n+deltau)dz + W(n+0.5) ] -subroutine compute_ssh_rhs_ale(mesh) +! In the semiimplicit method: +! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... +! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (11) rhs +subroutine compute_ssh_rhs_ale(dynamics, partit, mesh) use g_config,only: which_ALE,dt use MOD_MESH - use o_ARRAYS + use o_ARRAYS, only: water_flux use o_PARAM - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN use g_comm_auto implicit none - - ! In the semiimplicit method: - ! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... - ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (11) rhs + type(t_mesh) , intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + !___________________________________________________________________________ integer :: ed, el(2), enodes(2), nz, n, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 real(kind=WP) :: dumc1_1, dumc1_2, dumc2_1, dumc2_2 !!PS - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + ssh_rhs => dynamics%ssh_rhs(:) + ssh_rhs_old=> dynamics%ssh_rhs_old(:) - ssh_rhs=0.0_WP !___________________________________________________________________________ ! loop over local edges +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + ssh_rhs(n)=0.0_WP + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(ed, el, enodes, n, nz, nzmin, nzmax, c1, c2, deltaX1, deltaX2, deltaY1, deltaY2, & +!$OMP dumc1_1, dumc1_2, dumc2_1, dumc2_2) +!$OMP DO do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,ed) @@ -1540,10 +1728,26 @@ subroutine compute_ssh_rhs_ale(mesh) !_______________________________________________________________________ ! calc netto "flux" +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED +#endif ssh_rhs(enodes(1))=ssh_rhs(enodes(1))+(c1+c2) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock(partit%plock(enodes(2))) +#endif ssh_rhs(enodes(2))=ssh_rhs(enodes(2))-(c1+c2) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED +#endif + end do - +!$OMP END DO + !___________________________________________________________________________ ! take into account water flux ! at this point: ssh_rhs = -alpha * nabla*int(u^n + deltau dz) @@ -1554,18 +1758,27 @@ subroutine compute_ssh_rhs_ale(mesh) ! ! shown in eq (11) rhs of "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (11) rhs if ( .not. trim(which_ALE)=='linfs') then +!$OMP DO do n=1,myDim_nod2D nzmin = ulevels_nod2D(n) - ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*area(nzmin,n)+(1.0_WP-alpha)*ssh_rhs_old(n) - !!PS ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*area(1,n)+(1.0_WP-alpha)*ssh_rhs_old(n) + if (ulevels_nod2D(n)>1) then + ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*areasvol(nzmin,n) + else + ssh_rhs(n)=ssh_rhs(n)-alpha*water_flux(n)*areasvol(nzmin,n)+(1.0_WP-alpha)*ssh_rhs_old(n) + end if end do +!$OMP END DO else +!$OMP DO do n=1,myDim_nod2D + if (ulevels_nod2D(n)>1) cycle ssh_rhs(n)=ssh_rhs(n)+(1.0_WP-alpha)*ssh_rhs_old(n) end do +!$OMP END DO end if - call exchange_nod(ssh_rhs) - +!$OMP END PARALLEL + call exchange_nod(ssh_rhs, partit) +!$OMP BARRIER end subroutine compute_ssh_rhs_ale ! ! @@ -1579,30 +1792,51 @@ end subroutine compute_ssh_rhs_ale ! hbar(n+0.5) = hbar(n-0.5) - tau*ssh_rhs_old ! ! in S. Danilov et al.: "FESOM2: from finite elements to finite volumes" -subroutine compute_hbar_ale(mesh) +! +! see "FESOM2: from finite elements to finte volumes, S. Danilov..." +! hbar(n+1)-hbar(n)=tau*ssh_rhs_old +! ssh_rhs_old=-\nabla\int(U_n)dz-water_flux*area (if free surface) +! Find new elevation hbar +subroutine compute_hbar_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, use_cavity use MOD_MESH - use o_ARRAYS + use o_ARRAYS, only: water_flux use o_PARAM - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN use g_comm_auto - implicit none - - ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." - ! hbar(n+1)-hbar(n)=tau*ssh_rhs_old - ! ssh_rhs_old=-\nabla\int(U_n)dz-water_flux*area (if free surface) - ! Find new elevation hbar - - integer :: ed, el(2), enodes(2), nz,n, elnodes(3), elem, nzmin, nzmax + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: ed, el(2), enodes(2), elem, elnodes(3), n, nz, nzmin, nzmax real(kind=WP) :: c1, c2, deltaX1, deltaX2, deltaY1, deltaY2 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + ssh_rhs => dynamics%ssh_rhs(:) + ssh_rhs_old=> dynamics%ssh_rhs_old(:) !___________________________________________________________________________ ! compute the rhs - ssh_rhs_old=0.0_WP + +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + ssh_rhs_old(n)=0.0_WP + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(ed, el, enodes, elem, elnodes, n, nz, nzmin, nzmax, & +!$OMP c1, c2, deltaX1, deltaX2, deltaY1, deltaY2) +!$OMP DO do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,ed) @@ -1638,46 +1872,64 @@ subroutine compute_hbar_ale(mesh) end do end if !_______________________________________________________________________ +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED +#endif ssh_rhs_old(enodes(1))=ssh_rhs_old(enodes(1))+(c1+c2) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock(partit%plock(enodes(2))) +#endif ssh_rhs_old(enodes(2))=ssh_rhs_old(enodes(2))-(c1+c2) - +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED +#endif end do - +!$OMP END DO +!$OMP END PARALLEL + !___________________________________________________________________________ ! take into account water flux -!!PS if (.not. trim(which_ALE)=='linfs') then -!!PS ssh_rhs_old(1:myDim_nod2D)=ssh_rhs_old(1:myDim_nod2D)-water_flux(1:myDim_nod2D)*area(1,1:myDim_nod2D) -!!PS call exchange_nod(ssh_rhs_old) -!!PS end if if (.not. trim(which_ALE)=='linfs') then +!$OMP PARALLEL DO do n=1,myDim_nod2D - ssh_rhs_old(n)=ssh_rhs_old(n)-water_flux(n)*area(ulevels_nod2D(n),n) + ssh_rhs_old(n)=ssh_rhs_old(n)-water_flux(n)*areasvol(ulevels_nod2D(n),n) end do - call exchange_nod(ssh_rhs_old) +!$OMP END PARALLEL DO + call exchange_nod(ssh_rhs_old, partit) +!$OMP BARRIER end if - !___________________________________________________________________________ ! update the thickness -!!PS hbar_old=hbar -!!PS hbar(1:myDim_nod2D)=hbar_old(1:myDim_nod2D)+ssh_rhs_old(1:myDim_nod2D)*dt/area(1,1:myDim_nod2D) -!!PS call exchange_nod(hbar) - hbar_old=hbar +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + hbar_old(n)=hbar(n) + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO do n=1,myDim_nod2D - hbar(n)=hbar_old(n)+ssh_rhs_old(n)*dt/area(ulevels_nod2D(n),n) + hbar(n)=hbar_old(n)+ssh_rhs_old(n)*dt/areasvol(ulevels_nod2D(n),n) end do - call exchange_nod(hbar) - +!$OMP END PARALLEL DO + call exchange_nod(hbar, partit) +!$OMP BARRIER !___________________________________________________________________________ ! fill the array for updating the stiffness matrix +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes) do elem=1,myDim_elem2D elnodes=elem2D_nodes(:,elem) - if (ulevels(elem)>1) then + if (ulevels(elem) > 1) then dhe(elem) = 0.0_WP else dhe(elem) = sum(hbar(elnodes)-hbar_old(elnodes))/3.0_WP endif end do - +!$OMP END PARALLEL DO end subroutine compute_hbar_ale ! ! @@ -1694,38 +1946,66 @@ end subroutine compute_hbar_ale ! > for zlevel: dh_k/dt_k=1 != 0 ! > for zstar : dh_k/dt_k=1...kbot-1 != 0 ! -subroutine vert_vel_ale(mesh) +subroutine vert_vel_ale(dynamics, partit, mesh) use g_config,only: dt, which_ALE, min_hnode, lzstar_lev, flag_warn_cflz use MOD_MESH - use O_MESH - use o_ARRAYS + use o_ARRAYS, only: water_flux use o_PARAM - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN use g_comm_auto use io_RESTART !!PS - use i_arrays !!PS use g_forcing_arrays !!PS implicit none - - integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax - real(kind=WP) :: c1, c2, deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax - - !_______________________________ + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: el(2), enodes(2), n, nz, ed, nzmin, nzmax, uln1, uln2, nln1, nln2 + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, dd, dd1, dddt, cflmax + ! still to be understood but if you allocate these arrays statically the results will be different: + real(kind=WP) :: c1(mesh%nl-1), c2(mesh%nl-1) ! --> zlevel with local zstar - real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int !PS - real(kind=WP), dimension(:), allocatable :: max_dhbar2distr,cumsum_maxdhbar,distrib_dhbar + real(kind=WP) :: dhbar_total, dhbar_rest, distrib_dhbar_int + real(kind=WP), dimension(:), allocatable :: max_dhbar2distr, cumsum_maxdhbar, distrib_dhbar integer , dimension(:), allocatable :: idx - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - !___________________________________________________________________________ - ! Contributions from levels in divergence - Wvel=0.0_WP + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, CFL_z, fer_Wvel + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + Wvel_e => dynamics%w_e(:,:) + Wvel_i => dynamics%w_i(:,:) + CFL_z => dynamics%cfl_z(:,:) + ssh_rhs => dynamics%ssh_rhs(:) + ssh_rhs_old => dynamics%ssh_rhs_old(:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) if (Fer_GM) then - fer_Wvel=0.0_WP - end if - + fer_UV => dynamics%fer_uv(:,:,:) + fer_Wvel=> dynamics%fer_w(:,:) + end if + !___________________________________________________________________________ + ! Contributions from levels in divergence +!$OMP PARALLEL DO + DO n=1, myDim_nod2D+eDim_nod2D + Wvel(:, n)=0.0_WP + if (Fer_GM) then + fer_Wvel(:, n)=0.0_WP + end if + END DO +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(ed, enodes, el, deltaX1, deltaY1, nz, nzmin, nzmax, deltaX2, deltaY2, c1, c2) +!$OMP DO do ed=1, myDim_edge2D ! local indice of nodes that span up edge ed enodes=edges(:,ed) @@ -1743,25 +2023,40 @@ subroutine vert_vel_ale(mesh) ! do it with gauss-law: int( div(u_vec)*dV) = int( u_vec * n_vec * dS ) nzmin = ulevels(el(1)) nzmax = nlevels(el(1))-1 - !!PS do nz=nlevels(el(1))-1,1,-1 +! we introduced c1 & c2 as arrays here to avoid deadlocks when in OpenMP mode do nz = nzmax, nzmin, -1 ! --> h * u_vec * n_vec ! --> e_vec = (dx,dy), n_vec = (-dy,dx); ! --> h * u*(-dy) + v*dx - c1=( UV(2,nz,el(1))*deltaX1 - UV(1,nz,el(1))*deltaY1 )*helem(nz,el(1)) + c1(nz)=( UV(2,nz,el(1))*deltaX1 - UV(1,nz,el(1))*deltaY1 )*helem(nz,el(1)) ! inflow(outflow) "flux" to control volume of node enodes1 - Wvel(nz,enodes(1))=Wvel(nz,enodes(1))+c1 ! is equal to outflow(inflow) "flux" to control volume of node enodes2 - Wvel(nz,enodes(2))=Wvel(nz,enodes(2))-c1 if (Fer_GM) then - c1=(fer_UV(2,nz,el(1))*deltaX1- & - fer_UV(1,nz,el(1))*deltaY1)*helem(nz,el(1)) - fer_Wvel(nz,enodes(1))=fer_Wvel(nz,enodes(1))+c1 - fer_Wvel(nz,enodes(2))=fer_Wvel(nz,enodes(2))-c1 - end if - + c2(nz)=(fer_UV(2,nz,el(1))*deltaX1- fer_UV(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + end if end do - +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(enodes(1))) +#else +!$OMP ORDERED +#endif + Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) + if (Fer_GM) then + fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) + end if +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif + Wvel (nzmin:nzmax, enodes(2))= Wvel (nzmin:nzmax, enodes(2))-c1(nzmin:nzmax) + if (Fer_GM) then + fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) + end if +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED +#endif !_______________________________________________________________________ ! if ed is not a boundary edge --> calc div(u_vec*h) for every layer ! for el(2) @@ -1769,33 +2064,52 @@ subroutine vert_vel_ale(mesh) deltaX2=edge_cross_dxdy(3,ed) deltaY2=edge_cross_dxdy(4,ed) nzmin = ulevels(el(2)) - nzmax = nlevels(el(2))-1 - !!PS do nz=nlevels(el(2))-1,1,-1 + nzmax = nlevels(el(2))-1 do nz = nzmax, nzmin, -1 - c2=-(UV(2,nz,el(2))*deltaX2 - UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - Wvel(nz,enodes(1))=Wvel(nz,enodes(1))+c2 - Wvel(nz,enodes(2))=Wvel(nz,enodes(2))-c2 + c1(nz)=-(UV(2,nz,el(2))*deltaX2 - UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) if (Fer_GM) then - c2=-(fer_UV(2,nz,el(2))*deltaX2- & - fer_UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) - fer_Wvel(nz,enodes(1))=fer_Wvel(nz,enodes(1))+c2 - fer_Wvel(nz,enodes(2))=fer_Wvel(nz,enodes(2))-c2 - end if + c2(nz)=-(fer_UV(2,nz,el(2))*deltaX2-fer_UV(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + end if end do +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(enodes(1))) +#else +!$OMP ORDERED +#endif + Wvel (nzmin:nzmax, enodes(1))= Wvel (nzmin:nzmax, enodes(1))+c1(nzmin:nzmax) + if (Fer_GM) then + fer_Wvel(nzmin:nzmax, enodes(1))= fer_Wvel(nzmin:nzmax, enodes(1))+c2(nzmin:nzmax) + end if +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif + Wvel (nzmin:nzmax, enodes(2))= Wvel (nzmin:nzmax, enodes(2))-c1(nzmin:nzmax) + if (Fer_GM) then + fer_Wvel(nzmin:nzmax, enodes(2))= fer_Wvel(nzmin:nzmax, enodes(2))-c2(nzmin:nzmax) + end if +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED +#endif end if end do ! --> do ed=1, myDim_edge2D +!$OMP END DO +!$OMP END PARALLEL ! | ! | ! +--> until here Wvel contains the thickness divergence div(u) - !___________________________________________________________________________ ! cumulative summation of div(u_vec*h) vertically ! W_k = W_k+1 - div(h_k*u_k) ! W_k ... vertical flux trough + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2d(n)-1 - !!PS do nz=nl-1,1,-1 + do nz=nzmax,nzmin,-1 Wvel(nz,n)=Wvel(nz,n)+Wvel(nz+1,n) if (Fer_GM) then @@ -1803,21 +2117,24 @@ subroutine vert_vel_ale(mesh) end if end do end do - +!$OMP END PARALLEL DO !___________________________________________________________________________ ! divide with depth dependent cell area to convert from Vertical flux to ! physical vertical velocities in units m/s +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2d(n)-1 - !!PS do nz=1,nlevels_nod2D(n)-1 + do nz=nzmin,nzmax Wvel(nz,n)=Wvel(nz,n)/area(nz,n) if (Fer_GM) then - fer_Wvel(nz,n)=fer_Wvel(nz,n)/area(nz,n) + fer_Wvel(nz,n)=fer_Wvel(nz,n)/area(nz,n) end if + end do end do +!$OMP END PARALLEL DO ! | ! |--> (A) linear free surface: dh/dt=0 ; W_t-W_b = -div(hu) ! | @@ -1842,11 +2159,14 @@ subroutine vert_vel_ale(mesh) !_______________________________________________________________________ ! idx is only needed for local star case to estimate over how much ! depth layers change in ssh needs to be distributed - allocate(max_dhbar2distr(lzstar_lev),distrib_dhbar(lzstar_lev),idx(lzstar_lev),cumsum_maxdhbar(lzstar_lev)) - idx = (/(nz,nz=1,lzstar_lev,1)/) !!PS allocate(max_dhbar2distr(nl-1),distrib_dhbar(nl-1),idx(nl-1),cumsum_maxdhbar(nl-1)) !!PS idx = (/(nz,nz=1,nl-1,1)/) - + allocate(max_dhbar2distr(lzstar_lev), distrib_dhbar(lzstar_lev), idx(lzstar_lev), cumsum_maxdhbar(lzstar_lev)) + idx = (/(nz, nz=1, lzstar_lev, 1)/) + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, dhbar_total, max_dhbar2distr, cumsum_maxdhbar, & +!$OMP distrib_dhbar, dhbar_rest, distrib_dhbar_int) +!$OMP DO do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D_min(n)-1 @@ -1867,7 +2187,7 @@ subroutine vert_vel_ale(mesh) ! layerthickness becomes to small or even negativ and model ! blows up !!PS if (dhbar_total<0.0_WP .and. hnode(1,n)+dhbar_total<=(zbar(1)-zbar(2))*min_hnode ) then - if (dhbar_total<0.0_WP .and. hnode(nzmin,n)+dhbar_total<=(zbar(nzmin)-zbar(nzmin+1))*min_hnode ) then + if (dhbar_total < 0.0_WP .and. hnode(nzmin,n)+dhbar_total<=(zbar(nzmin)-zbar(nzmin+1))*min_hnode ) then ! --> do local zstar case !_______________________________________________________________ ! max_dhbar2distr ... how much negative ssh change can be maximal @@ -2025,13 +2345,15 @@ subroutine vert_vel_ale(mesh) Wvel(nzmin,n) = Wvel(nzmin,n)-water_flux(n) end do ! --> do n=1, myDim_nod2D - +!$OMP END DO +!$OMP END PARALLEL !_______________________________________________________________________ deallocate(max_dhbar2distr,distrib_dhbar,idx,cumsum_maxdhbar) !___________________________________________________________________________ elseif (trim(which_ALE)=='zstar') then ! distribute total change in ssh (hbar(n)-hbar_old(n)) over all layers +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, dd, dd1, dddt) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) !!PS nzmin = ulevels_nod2D_max(n) @@ -2064,7 +2386,7 @@ subroutine vert_vel_ale(mesh) !___________________________________________________________________ !!PS do nz=1,nlevels_nod2D_min(n)-2 - do nz=nzmin,nzmax-1 + do nz=nzmin, nzmax-1 ! why *(zbar(nz)-dd1) ??? ! because here Wvel_k = SUM_k:kmax(div(h_k*v_k))/V_k ! but Wvel_k = Wvel_k+1 - div(h_k*v_k) - h⁰_k/H*dhbar/dt @@ -2089,17 +2411,17 @@ subroutine vert_vel_ale(mesh) Wvel(nzmin,n)=Wvel(nzmin,n)-water_flux(n) end do ! --> do n=1, myDim_nod2D +!$OMP END PARALLEL DO ! The implementation here is a bit strange, but this is to avoid ! unnecessary multiplications and divisions by area. We use the fact ! that we apply stretching only over the part of the column ! where area(nz,n)=area(1,n) - endif ! --> if(trim(which_ALE)=='....') then - - if (any(hnode_new<0.0_WP)) then - write(*,*) ' --> fatal problem <--: layerthickness of a layer became smaller zero' + +!$OMP PARALLEL DO do n=1, myDim_nod2D+eDim_nod2D - if (any( hnode_new(:,n)<0.0_WP)) then + if (any( hnode_new(:,n) < 0.0_WP)) then + write(*,*) ' --> fatal problem <--: layerthickness of a layer became smaller than zero' write(*,*) " mype = ", mype write(*,*) " mstep = ", mstep write(*,*) " node = ", n @@ -2133,37 +2455,50 @@ subroutine vert_vel_ale(mesh) write(*,*) end if end do -!!PS call par_ex(1) - endif - +!$OMP END PARALLEL DO !___________________________________________________________________________ - call exchange_nod(Wvel) - call exchange_nod(hnode_new) ! Or extend cycles above - if (Fer_GM) call exchange_nod(fer_Wvel) - + call exchange_nod(Wvel, partit) + call exchange_nod(hnode_new, partit) ! Or extend cycles above + if (Fer_GM) call exchange_nod(fer_Wvel, partit) +!$OMP BARRIER !___________________________________________________________________________ ! calc vertical CFL criteria for debugging purpose and vertical Wvel splitting - CFL_z(1,:)=0._WP +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + CFL_z(1,n)=0._WP + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n)-1 do nz=nzmin,nzmax - c1=abs(Wvel(nz,n) *dt/hnode_new(nz,n)) - c2=abs(Wvel(nz+1,n)*dt/hnode_new(nz,n)) + c1(1)=abs(Wvel(nz,n) *dt/hnode_new(nz,n)) !c1->c1(1) is made for the sake of reproducibility with the master branch (rounding error) + c2(1)=abs(Wvel(nz+1,n)*dt/hnode_new(nz,n)) !otherwise just add these terms (c(1) & c(2)) to CFL_z, respectively! ! strong condition: ! total volume change induced by the vertical motion ! no matter, upwind or downwind ! - CFL_z(nz, n)=CFL_z(nz,n)+c1 - CFL_z(nz+1,n)=c2 + CFL_z(nz, n)=CFL_z(nz,n)+c1(1) + CFL_z(nz+1,n)= c2(1) end do end do - cflmax=maxval(CFL_z(:, 1:myDim_nod2D)) !local CFL maximum is different on each mype - if (cflmax>1.0_WP .and. flag_warn_cflz) then +!$OMP END PARALLEL DO +cflmax=0. +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) REDUCTION(max:cflmax) +!$OMP DO + do n=1, myDim_nod2D+eDim_nod2D + cflmax=max(cflmax, maxval(CFL_z(:, n))) + end do +!$OMP END DO +!$OMP END PARALLEL + + if (cflmax > 1.0_WP .and. flag_warn_cflz) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax) do n=1, myDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n)-1 do nz=nzmin,nzmax - !!PS if (abs(CFL_z(nz,n)-cflmax) < 1.e-12) then if (abs(CFL_z(nz,n)-cflmax) < 1.e-12 .and. CFL_z(nz,n) > 1.75_WP .and. CFL_z(nz,n)<=2.5_WP ) then print '(A, A, F4.2, A, I6, A, F7.2,A,F6.2, A, I3,I3)', achar(27)//'[33m'//' --> WARNING CFLz>1.75:'//achar(27)//'[0m',& 'CFLz_max=',cflmax,',mstep=',mstep,',glon/glat=',geo_coord_nod2D(1,n)/rad,'/',geo_coord_nod2D(2,n)/rad,& @@ -2182,365 +2517,355 @@ subroutine vert_vel_ale(mesh) end if end do end do +!$OMP END PARALLEL DO end if !___________________________________________________________________________ ! Split implicit vertical velocity onto implicit and explicit components using CFL criteria: - ! w_max_cfl constrains the allowed explicit w according to the CFL at this place - ! w_max_cfl=1 means w_exp is cut at at the maximum of allowed CFL - ! w_max_cfl=0 means w_exp is zero (everything computed implicitly) - ! w_max_cfl=inf menas w_impl is zero (everything computed explicitly) - ! a guess for optimal choice of w_max_cfl would be 0.95 + ! wsplit_maxcfl constrains the allowed explicit w according to the CFL at this place + ! wsplit_maxcfl=1 means w_exp is cut at at the maximum of allowed CFL + ! wsplit_maxcfl=0 means w_exp is zero (everything computed implicitly) + ! wsplit_maxcfl=inf menas w_impl is zero (everything computed explicitly) + ! a guess for optimal choice of wsplit_maxcfl would be 0.95 +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, dd) do n=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) do nz=nzmin,nzmax - c1=1.0_WP - c2=0.0_WP - if (w_split .and. (CFL_z(nz, n) > w_max_cfl)) then - dd=max((CFL_z(nz, n)-w_max_cfl), 0.0_WP)/max(w_max_cfl, 1.e-12) - c1=1.0_WP/(1.0_WP+dd) !explicit part =1. if dd=0. - c2=dd /(1.0_WP+dd) !implicit part =1. if dd=inf + Wvel_e(nz,n)=Wvel(nz,n) + Wvel_i(nz,n)=0.0_WP + if (dynamics%use_wsplit .and. (CFL_z(nz, n) > dynamics%wsplit_maxcfl)) then + dd=max((CFL_z(nz, n)-dynamics%wsplit_maxcfl), 0.0_WP)/max(dynamics%wsplit_maxcfl, 1.e-12) + Wvel_e(nz,n)=(1.0_WP/(1.0_WP+dd))*Wvel(nz,n) !explicit part =1. if dd=0. + Wvel_i(nz,n)=(dd /(1.0_WP+dd))*Wvel(nz,n) !implicit part =1. if dd=inf end if - Wvel_e(nz,n)=c1*Wvel(nz,n) - Wvel_i(nz,n)=c2*Wvel(nz,n) end do end do +!$OMP END PARALLEL DO end subroutine vert_vel_ale + ! ! !=============================================================================== ! solve eq.18 in S. Danilov et al. : FESOM2: from finite elements to finite volumes. ! for (eta^(n+1)-eta^n) = d_eta -subroutine solve_ssh_ale(mesh) -use o_PARAM -use MOD_MESH -use o_ARRAYS -use g_PARSUP -use g_comm_auto -use g_config, only: which_ale - ! - ! - !___USE PETSC SOLVER________________________________________________________ - ! this is not longer used but is still kept in the code -#ifdef PETSC -implicit none -#include "petscf.h" -integer :: myrows -integer :: Pmode -real(kind=WP) :: rinfo(20,20) -integer :: maxiter=2000 -integer :: restarts=15 -integer :: fillin=3 -integer :: lutype=2 -integer :: nrhs=1 -real(kind=WP) :: droptol=1.e-7 -real(kind=WP) :: soltol =1e-10 !1.e-10 -logical, save :: lfirst=.true. -real(kind=WP), allocatable :: arr_nod2D(:),arr_nod2D2(:,:),arr_nod2D3(:) -real(kind=WP) :: cssh1,cssh2,crhs -integer :: i -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - -Pmode = PET_BLOCKP+PET_SOLVE + PET_BICGSTAB +PET_REPORT + PET_QUIET+ PET_RCM+PET_PCBJ -if (lfirst) then - Pmode = Pmode+PET_STRUCT+PET_PMVALS + PET_PCASM+PET_OVL_2 !+PET_PCBJ+PET_ILU - lfirst=.false. -end if -call PETSC_S(Pmode, 1, ssh_stiff%dim, ssh_stiff%nza, myrows, & - maxiter, & - restarts, & - fillin, & - droptol, & - soltol, & - part, ssh_stiff%rowptr, ssh_stiff%colind, ssh_stiff%values, & - ssh_rhs, d_eta, & - rinfo, MPI_COMM_FESOM, mesh) - ! - ! - !___USE PARMS SOLVER (recommended)__________________________________________ -#elif defined(PARMS) - - use iso_c_binding, only: C_INT, C_DOUBLE - implicit none +subroutine solve_ssh_ale(dynamics, partit, mesh) + use o_PARAM + use MOD_MESH + use o_ARRAYS + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + use g_comm_auto + use g_config, only: which_ale + use iso_c_binding, only: C_INT, C_DOUBLE + use ssh_solve_preconditioner_interface + use ssh_solve_cg_interface + implicit none #include "fparms.h" -logical, save :: lfirst=.true. -integer(kind=C_INT) :: ident -integer(kind=C_INT) :: n3, reuse, new_values -integer(kind=C_INT) :: maxiter, restart, lutype, fillin -real(kind=C_DOUBLE) :: droptol, soltol -integer :: n -type(t_mesh), intent(in) , target :: mesh - -interface - subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & - fillin, droptol, maxiter, restart, soltol, & - part, rowptr, colind, values, reuse, MPI_COMM) bind(C) - use iso_c_binding, only: C_INT, C_DOUBLE - integer(kind=C_INT) :: ident, SOL, PCGLOB, PCLOC, lutype, & - fillin, maxiter, restart, & - part(*), rowptr(*), colind(*), reuse, MPI_COMM - real(kind=C_DOUBLE) :: droptol, soltol, values(*) - end subroutine psolver_init -end interface -interface - subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) - - use iso_c_binding, only: C_INT, C_DOUBLE - integer(kind=C_INT) :: ident, newvalues - real(kind=C_DOUBLE) :: values(*), ssh_rhs(*), d_eta(*) - - end subroutine psolve -end interface - -#include "associate_mesh.h" - -ident=1 -maxiter=2000 -restart=15 -fillin=3 -lutype=2 -droptol=1.e-8 -soltol=1.e-10 - -if (trim(which_ale)=='linfs') then - reuse=0 - new_values=0 -else - reuse=1 ! For varying coefficients, set reuse=1 - new_values=1 !PS 1 ! and new_values=1, as soon as the coefficients have changed -end if - -! reuse=0: matrix remains static -! reuse=1: keeps a copy of the matrix structure to apply scaling of the matrix fast - -! new_values=0: matrix coefficients unchanged (compared to the last call of psolve) -! new_values=1: replaces the matrix values (keeps the structure and the preconditioner) -! new_values=2: replaces the matrix values and recomputes the preconditioner (keeps the structure) - -! new_values>0 requires reuse=1 in psolver_init! - -if (lfirst) then - ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) - ! SOLBICGS for BiCGstab solver (arbitrary matrices) - ! SOLBICGS_RAS for BiCGstab solver (arbitrary matrices) with integrated RAS - the global - ! preconditioner setting is ignored! It saves a 4 vector copies per iteration - ! compared to SOLBICGS + PCRAS. - ! SOLPBICGS for pipelined BiCGstab solver (arbitrary matrices) - ! Should scale better than SOLBICGS, but be careful, it is still experimental. - ! SOLPBICGS_RAS is SOLPBICGS with integrated RAS (global preconditioner setting is ignored!) - ! for even better scalability, well, in the end, it does not matter much. - call psolver_init(ident, SOLBICGS_RAS, PCRAS, PCILUK, lutype, & - fillin, droptol, maxiter, restart, soltol, & - part-1, ssh_stiff%rowptr(:)-ssh_stiff%rowptr(1), & - ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) - lfirst=.false. -end if - call psolve(ident, ssh_rhs, ssh_stiff%values, d_eta, new_values) + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ + logical, save :: lfirst=.true. + integer(kind=C_INT) :: n3, reuse, new_values + integer :: n + + !___________________________________________________________________________ + ! interface for solver + interface + subroutine psolver_init(ident, SOL, PCGLOB, PCLOC, lutype, & + fillin, droptol, maxiter, restart, soltol, & + part, rowptr, colind, values, reuse, MPI_COMM) bind(C) + use iso_c_binding, only: C_INT, C_DOUBLE + integer(kind=C_INT) :: ident, SOL, PCGLOB, PCLOC, lutype, & + fillin, maxiter, restart, & + part(*), rowptr(*), colind(*), reuse, MPI_COMM + real(kind=C_DOUBLE) :: droptol, soltol, values(*) + end subroutine psolver_init + end interface + interface + subroutine psolve(ident, ssh_rhs, values, d_eta, newvalues) bind(C) + use iso_c_binding, only: C_INT, C_DOUBLE + integer(kind=C_INT) :: ident, newvalues + real(kind=C_DOUBLE) :: values(*), ssh_rhs(*), d_eta(*) + end subroutine psolve + end interface + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=C_DOUBLE), pointer :: droptol, soltol + integer(kind=C_INT), pointer :: maxiter, restart, lutype, fillin, ident +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ident => dynamics%solverinfo%ident + maxiter => dynamics%solverinfo%maxiter + restart => dynamics%solverinfo%restart + lutype => dynamics%solverinfo%lutype + fillin => dynamics%solverinfo%fillin + droptol => dynamics%solverinfo%droptol + soltol => dynamics%solverinfo%soltol + + if (.not. dynamics%solverinfo%use_parms) then + if (lfirst) call ssh_solve_preconditioner(dynamics%solverinfo, partit, mesh) + call ssh_solve_cg(dynamics%d_eta, dynamics%ssh_rhs, dynamics%solverinfo, partit, mesh) + lfirst=.false. + return + end if + + !___________________________________________________________________________ + if (trim(which_ale)=='linfs') then + reuse=0 + new_values=0 + else + reuse=1 ! For varying coefficients, set reuse=1 + new_values=1 !PS 1 ! and new_values=1, as soon as the coefficients have changed + end if + + ! reuse=0: matrix remains static + ! reuse=1: keeps a copy of the matrix structure to apply scaling of the matrix fast + + ! new_values=0: matrix coefficients unchanged (compared to the last call of psolve) + ! new_values=1: replaces the matrix values (keeps the structure and the preconditioner) + ! new_values=2: replaces the matrix values and recomputes the preconditioner (keeps the structure) + + ! new_values>0 requires reuse=1 in psolver_init! -#endif ! + !___________________________________________________________________________ + if (lfirst) then + ! Set SOLCG for CG solver (symmetric, positiv definit matrices only, no precond available!!) + ! SOLBICGS for BiCGstab solver (arbitrary matrices) + ! SOLBICGS_RAS for BiCGstab solver (arbitrary matrices) with integrated RAS - the global + ! preconditioner setting is ignored! It saves a 4 vector copies per iteration + ! compared to SOLBICGS + PCRAS. + ! SOLPBICGS for pipelined BiCGstab solver (arbitrary matrices) + ! Should scale better than SOLBICGS, but be careful, it is still experimental. + ! SOLPBICGS_RAS is SOLPBICGS with integrated RAS (global preconditioner setting is ignored!) + ! for even better scalability, well, in the end, it does not matter much. + call psolver_init(ident, SOLBICGS_RAS, PCRAS, PCILUK, lutype, & + fillin, droptol, maxiter, restart, soltol, & + part-1, ssh_stiff%rowptr(:)-ssh_stiff%rowptr(1), & + ssh_stiff%colind-1, ssh_stiff%values, reuse, MPI_COMM_FESOM) + lfirst=.false. + end if + ! + !___________________________________________________________________________ + call psolve(ident, dynamics%ssh_rhs, ssh_stiff%values, dynamics%d_eta, new_values) + ! !___________________________________________________________________________ -call exchange_nod(d_eta) !is this required after calling psolve ? + call exchange_nod(dynamics%d_eta, partit) !is this required after calling psolve ? end subroutine solve_ssh_ale ! ! !=============================================================================== -subroutine impl_vert_visc_ale(mesh) -USE MOD_MESH -USE o_PARAM -USE o_ARRAYS -USE g_PARSUP -USE g_CONFIG,only: dt -IMPLICIT NONE - -type(t_mesh), intent(in) , target :: mesh -real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) -real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) -integer :: nz, elem, nzmax, nzmin, elnodes(3) -real(kind=WP) :: zinv, m, friction, wu, wd - -#include "associate_mesh.h" - -DO elem=1,myDim_elem2D - elnodes=elem2D_nodes(:,elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - - !___________________________________________________________________________ - ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because - ! they run over elements here - zbar_n=0.0_WP - Z_n =0.0_WP - ! in case of partial cells zbar_n(nzmax) is not any more at zbar(nzmax), - ! zbar_n(nzmax) is now zbar_e_bot(elem), - zbar_n(nzmax)=zbar_e_bot(elem) - Z_n(nzmax-1)=zbar_n(nzmax) + helem(nzmax-1,elem)/2.0_WP - !!PS do nz=nzmax-1,2,-1 - do nz=nzmax-1,nzmin+1,-1 - zbar_n(nz) = zbar_n(nz+1) + helem(nz,elem) - Z_n(nz-1) = zbar_n(nz) + helem(nz-1,elem)/2.0_WP - end do - !!PS zbar_n(1) = zbar_n(2) + helem(1,elem) - zbar_n(nzmin) = zbar_n(nzmin+1) + helem(nzmin,elem) - +subroutine impl_vert_visc_ale(dynamics, partit, mesh) + USE MOD_MESH + USE o_PARAM + USE o_ARRAYS, only: Av, stress_surf + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE g_CONFIG,only: dt + IMPLICIT NONE + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ + real(kind=WP) :: a(mesh%nl-1), b(mesh%nl-1), c(mesh%nl-1), ur(mesh%nl-1), vr(mesh%nl-1) + real(kind=WP) :: cp(mesh%nl-1), up(mesh%nl-1), vp(mesh%nl-1) + integer :: nz, elem, nzmin, nzmax, elnodes(3) + real(kind=WP) :: zinv, m, friction, wu, wd + real(kind=WP) :: zbar_n(mesh%nl), Z_n(mesh%nl-1) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: Wvel_i +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV =>dynamics%uv(:,:,:) + UV_rhs =>dynamics%uv_rhs(:,:,:) + Wvel_i =>dynamics%w_i(:,:) + !___________________________________________________________________________ - ! Operator - ! Regular part of coefficients: - !!PS do nz=2, nzmax-2 - do nz=nzmin+1, nzmax-2 - zinv=1.0_WP*dt/(zbar_n(nz)-zbar_n(nz+1)) - a(nz)=-Av(nz,elem)/(Z_n(nz-1)-Z_n(nz))*zinv - c(nz)=-Av(nz+1,elem)/(Z_n(nz)-Z_n(nz+1))*zinv - b(nz)=-a(nz)-c(nz)+1.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(a, b, c, ur, vr, cp, up, vp, elem, nz, nzmin, nzmax, elnodes, & +!$OMP zinv, m, friction, wu, wd, zbar_n, Z_n) + +!$OMP DO + DO elem=1,myDim_elem2D + elnodes=elem2D_nodes(:,elem) + nzmin = ulevels(elem) + nzmax = nlevels(elem) + + !___________________________________________________________________________ + ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because + ! they run over elements here + zbar_n=0.0_WP + Z_n =0.0_WP + ! in case of partial cells zbar_n(nzmax) is not any more at zbar(nzmax), + ! zbar_n(nzmax) is now zbar_e_bot(elem), + zbar_n(nzmax)=zbar_e_bot(elem) + Z_n(nzmax-1)=zbar_n(nzmax) + helem(nzmax-1,elem)/2.0_WP + !!PS do nz=nzmax-1,2,-1 + do nz=nzmax-1,nzmin+1,-1 + zbar_n(nz) = zbar_n(nz+1) + helem(nz,elem) + Z_n(nz-1) = zbar_n(nz) + helem(nz-1,elem)/2.0_WP + end do + !!PS zbar_n(1) = zbar_n(2) + helem(1,elem) + zbar_n(nzmin) = zbar_n(nzmin+1) + helem(nzmin,elem) + + !___________________________________________________________________________ + ! Operator + ! Regular part of coefficients: + !!PS do nz=2, nzmax-2 + do nz=nzmin+1, nzmax-2 + zinv=1.0_WP*dt/(zbar_n(nz)-zbar_n(nz+1)) + a(nz)=-Av(nz,elem)/(Z_n(nz-1)-Z_n(nz))*zinv + c(nz)=-Av(nz+1,elem)/(Z_n(nz)-Z_n(nz+1))*zinv + b(nz)=-a(nz)-c(nz)+1.0_WP + ! Update from the vertical advection + wu=sum(Wvel_i(nz, elnodes))/3._WP + wd=sum(Wvel_i(nz+1, elnodes))/3._WP + a(nz)=a(nz)+min(0._WP, wu)*zinv + b(nz)=b(nz)+max(0._WP, wu)*zinv + + b(nz)=b(nz)-min(0._WP, wd)*zinv + c(nz)=c(nz)-max(0._WP, wd)*zinv + end do + ! The last row + zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) + a(nzmax-1)=-Av(nzmax-1,elem)/(Z_n(nzmax-2)-Z_n(nzmax-1))*zinv + b(nzmax-1)=-a(nzmax-1)+1.0_WP + c(nzmax-1)=0.0_WP + ! Update from the vertical advection - wu=sum(Wvel_i(nz, elnodes))/3._WP - wd=sum(Wvel_i(nz+1, elnodes))/3._WP - a(nz)=a(nz)+min(0._WP, wu)*zinv - b(nz)=b(nz)+max(0._WP, wu)*zinv + wu=sum(Wvel_i(nzmax-1, elnodes))/3._WP + a(nzmax-1)=a(nzmax-1)+min(0._WP, wu)*zinv + b(nzmax-1)=b(nzmax-1)+max(0._WP, wu)*zinv - b(nz)=b(nz)-min(0._WP, wd)*zinv - c(nz)=c(nz)-max(0._WP, wd)*zinv - if (a(nz)/=a(nz) .or. b(nz)/=b(nz) .or. c(nz)/=c(nz)) then - write(*,*) ' --> found a,b,c is NaN' - write(*,*) 'mype=',mype - write(*,*) 'nz=',nz - write(*,*) 'a(nz), b(nz), c(nz)=',a(nz), b(nz), c(nz) - write(*,*) 'Av(nz,elem)=',Av(nz,elem) - write(*,*) 'Av(nz+1,elem)=',Av(nz+1,elem) - write(*,*) 'Z_n(nz-1:nz+1)=',Z_n(nz-1:nz+1) - write(*,*) 'zbar_n(nz:nz+1)=',zbar_n(nz:nz+1) - endif - end do - ! The last row - zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) - a(nzmax-1)=-Av(nzmax-1,elem)/(Z_n(nzmax-2)-Z_n(nzmax-1))*zinv - b(nzmax-1)=-a(nzmax-1)+1.0_WP - c(nzmax-1)=0.0_WP - - ! Update from the vertical advection - wu=sum(Wvel_i(nzmax-1, elnodes))/3._WP - a(nzmax-1)=a(nzmax-1)+min(0._WP, wu)*zinv - b(nzmax-1)=b(nzmax-1)+max(0._WP, wu)*zinv - - ! The first row - !!PS zinv=1.0_WP*dt/(zbar_n(1)-zbar_n(2)) - !!PS c(1)=-Av(2,elem)/(Z_n(1)-Z_n(2))*zinv - !!PS a(1)=0.0_WP - !!PS b(1)=-c(1)+1.0_WP - zinv=1.0_WP*dt/(zbar_n(nzmin)-zbar_n(nzmin+1)) - c(nzmin)=-Av(nzmin+1,elem)/(Z_n(nzmin)-Z_n(nzmin+1))*zinv - a(nzmin)=0.0_WP - b(nzmin)=-c(nzmin)+1.0_WP - - ! Update from the vertical advection - !!PS wu=sum(Wvel_i(1, elnodes))/3._WP - !!PS wd=sum(Wvel_i(2, elnodes))/3._WP - wu=sum(Wvel_i(nzmin, elnodes))/3._WP - wd=sum(Wvel_i(nzmin+1, elnodes))/3._WP - - !!PS b(1)=b(1)+wu*zinv - !!PS b(1)=b(1)-min(0._WP, wd)*zinv - !!PS c(1)=c(1)-max(0._WP, wd)*zinv - b(nzmin)=b(nzmin)+wu*zinv - b(nzmin)=b(nzmin)-min(0._WP, wd)*zinv - c(nzmin)=c(nzmin)-max(0._WP, wd)*zinv - - ! =========================== - ! The rhs: - ! =========================== - !!PS ur(1:nzmax-1)=UV_rhs(1,1:nzmax-1,elem) - !!PS vr(1:nzmax-1)=UV_rhs(2,1:nzmax-1,elem) - ur(nzmin:nzmax-1)=UV_rhs(1,nzmin:nzmax-1,elem) - vr(nzmin:nzmax-1)=UV_rhs(2,nzmin:nzmax-1,elem) - - ! The first row contains surface forcing - !!PS ur(1)= ur(1)+zinv*stress_surf(1,elem)/density_0 - !!PS vr(1)= vr(1)+zinv*stress_surf(2,elem)/density_0 - ur(nzmin)= ur(nzmin)+zinv*stress_surf(1,elem)/density_0 - vr(nzmin)= vr(nzmin)+zinv*stress_surf(2,elem)/density_0 - - ! The last row contains bottom friction - zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) - !!PS friction=-C_d*sqrt(UV(1,nlevels(elem)-1,elem)**2+ & - !!PS UV(2,nlevels(elem)-1,elem)**2) - friction=-C_d*sqrt(UV(1,nzmax-1,elem)**2+ & - UV(2,nzmax-1,elem)**2) - ur(nzmax-1)=ur(nzmax-1)+zinv*friction*UV(1,nzmax-1,elem) - vr(nzmax-1)=vr(nzmax-1)+zinv*friction*UV(2,nzmax-1,elem) - - ! Model solves for the difference to the timestep N and therefore we need to - ! update the RHS for advective and diffusive contributions at the previous time step - !!PS do nz=2, nzmax-2 - do nz=nzmin+1, nzmax-2 - ur(nz)=ur(nz)-a(nz)*UV(1,nz-1,elem)-(b(nz)-1.0_WP)*UV(1,nz,elem)-c(nz)*UV(1,nz+1,elem) - vr(nz)=vr(nz)-a(nz)*UV(2,nz-1,elem)-(b(nz)-1.0_WP)*UV(2,nz,elem)-c(nz)*UV(2,nz+1,elem) - end do - !!PS ur(1)=ur(1)-(b(1)-1.0_WP)*UV(1,1,elem)-c(1)*UV(1,2,elem) - !!PS vr(1)=vr(1)-(b(1)-1.0_WP)*UV(2,1,elem)-c(1)*UV(2,2,elem) - ur(nzmin)=ur(nzmin)-(b(nzmin)-1.0_WP)*UV(1,nzmin,elem)-c(nzmin)*UV(1,nzmin+1,elem) - vr(nzmin)=vr(nzmin)-(b(nzmin)-1.0_WP)*UV(2,nzmin,elem)-c(nzmin)*UV(2,nzmin+1,elem) - - ur(nzmax-1)=ur(nzmax-1)-a(nzmax-1)*UV(1,nzmax-2,elem)-(b(nzmax-1)-1.0_WP)*UV(1,nzmax-1,elem) - vr(nzmax-1)=vr(nzmax-1)-a(nzmax-1)*UV(2,nzmax-2,elem)-(b(nzmax-1)-1.0_WP)*UV(2,nzmax-1,elem) - - ! =========================== - ! The sweep algorithm - ! =========================== - ! initialize c-prime and s,t-prime - !!PS cp(1) = c(1)/b(1) - !!PS up(1) = ur(1)/b(1) - !!PS vp(1) = vr(1)/b(1) - cp(nzmin) = c(nzmin)/b(nzmin) - up(nzmin) = ur(nzmin)/b(nzmin) - vp(nzmin) = vr(nzmin)/b(nzmin) - - ! solve for vectors c-prime and t, s-prime - !!PS do nz = 2,nzmax-1 - do nz = nzmin+1,nzmax-1 - m = b(nz)-cp(nz-1)*a(nz) - cp(nz) = c(nz)/m - up(nz) = (ur(nz)-up(nz-1)*a(nz))/m - vp(nz) = (vr(nz)-vp(nz-1)*a(nz))/m - enddo - ! initialize x - ur(nzmax-1) = up(nzmax-1) - vr(nzmax-1) = vp(nzmax-1) - - ! solve for x from the vectors c-prime and d-prime - !!PS do nz = nzmax-2, 1, -1 - do nz = nzmax-2, nzmin, -1 - ur(nz) = up(nz)-cp(nz)*ur(nz+1) - vr(nz) = vp(nz)-cp(nz)*vr(nz+1) - end do - - ! =========================== - ! RHS update - ! =========================== - !!PS do nz=1,nzmax-1 - do nz=nzmin,nzmax-1 - UV_rhs(1,nz,elem)=ur(nz) - UV_rhs(2,nz,elem)=vr(nz) - end do -end do !!! cycle over elements - + ! The first row + !!PS zinv=1.0_WP*dt/(zbar_n(1)-zbar_n(2)) + !!PS c(1)=-Av(2,elem)/(Z_n(1)-Z_n(2))*zinv + !!PS a(1)=0.0_WP + !!PS b(1)=-c(1)+1.0_WP + zinv=1.0_WP*dt/(zbar_n(nzmin)-zbar_n(nzmin+1)) + c(nzmin)=-Av(nzmin+1,elem)/(Z_n(nzmin)-Z_n(nzmin+1))*zinv + a(nzmin)=0.0_WP + b(nzmin)=-c(nzmin)+1.0_WP + + ! Update from the vertical advection + !!PS wu=sum(Wvel_i(1, elnodes))/3._WP + !!PS wd=sum(Wvel_i(2, elnodes))/3._WP + wu=sum(Wvel_i(nzmin, elnodes))/3._WP + wd=sum(Wvel_i(nzmin+1, elnodes))/3._WP + + !!PS b(1)=b(1)+wu*zinv + !!PS b(1)=b(1)-min(0._WP, wd)*zinv + !!PS c(1)=c(1)-max(0._WP, wd)*zinv + b(nzmin)=b(nzmin)+wu*zinv + b(nzmin)=b(nzmin)-min(0._WP, wd)*zinv + c(nzmin)=c(nzmin)-max(0._WP, wd)*zinv + + ! =========================== + ! The rhs: + ! =========================== + !!PS ur(1:nzmax-1)=UV_rhs(1,1:nzmax-1,elem) + !!PS vr(1:nzmax-1)=UV_rhs(2,1:nzmax-1,elem) + ur(nzmin:nzmax-1)=UV_rhs(1,nzmin:nzmax-1,elem) + vr(nzmin:nzmax-1)=UV_rhs(2,nzmin:nzmax-1,elem) + + ! The first row contains surface forcing + !!PS ur(1)= ur(1)+zinv*stress_surf(1,elem)/density_0 + !!PS vr(1)= vr(1)+zinv*stress_surf(2,elem)/density_0 + ur(nzmin)= ur(nzmin)+zinv*stress_surf(1,elem)/density_0 + vr(nzmin)= vr(nzmin)+zinv*stress_surf(2,elem)/density_0 + + ! The last row contains bottom friction + zinv=1.0_WP*dt/(zbar_n(nzmax-1)-zbar_n(nzmax)) + !!PS friction=-C_d*sqrt(UV(1,nlevels(elem)-1,elem)**2+ & + !!PS UV(2,nlevels(elem)-1,elem)**2) + friction=-C_d*sqrt(UV(1,nzmax-1,elem)**2+ & + UV(2,nzmax-1,elem)**2) + ur(nzmax-1)=ur(nzmax-1)+zinv*friction*UV(1,nzmax-1,elem) + vr(nzmax-1)=vr(nzmax-1)+zinv*friction*UV(2,nzmax-1,elem) + + ! Model solves for the difference to the timestep N and therefore we need to + ! update the RHS for advective and diffusive contributions at the previous time step + !!PS do nz=2, nzmax-2 + do nz=nzmin+1, nzmax-2 + ur(nz)=ur(nz)-a(nz)*UV(1,nz-1,elem)-(b(nz)-1.0_WP)*UV(1,nz,elem)-c(nz)*UV(1,nz+1,elem) + vr(nz)=vr(nz)-a(nz)*UV(2,nz-1,elem)-(b(nz)-1.0_WP)*UV(2,nz,elem)-c(nz)*UV(2,nz+1,elem) + end do + !!PS ur(1)=ur(1)-(b(1)-1.0_WP)*UV(1,1,elem)-c(1)*UV(1,2,elem) + !!PS vr(1)=vr(1)-(b(1)-1.0_WP)*UV(2,1,elem)-c(1)*UV(2,2,elem) + ur(nzmin)=ur(nzmin)-(b(nzmin)-1.0_WP)*UV(1,nzmin,elem)-c(nzmin)*UV(1,nzmin+1,elem) + vr(nzmin)=vr(nzmin)-(b(nzmin)-1.0_WP)*UV(2,nzmin,elem)-c(nzmin)*UV(2,nzmin+1,elem) + + ur(nzmax-1)=ur(nzmax-1)-a(nzmax-1)*UV(1,nzmax-2,elem)-(b(nzmax-1)-1.0_WP)*UV(1,nzmax-1,elem) + vr(nzmax-1)=vr(nzmax-1)-a(nzmax-1)*UV(2,nzmax-2,elem)-(b(nzmax-1)-1.0_WP)*UV(2,nzmax-1,elem) + + ! =========================== + ! The sweep algorithm + ! =========================== + ! initialize c-prime and s,t-prime + !!PS cp(1) = c(1)/b(1) + !!PS up(1) = ur(1)/b(1) + !!PS vp(1) = vr(1)/b(1) + cp(nzmin) = c(nzmin)/b(nzmin) + up(nzmin) = ur(nzmin)/b(nzmin) + vp(nzmin) = vr(nzmin)/b(nzmin) + + ! solve for vectors c-prime and t, s-prime + !!PS do nz = 2,nzmax-1 + do nz = nzmin+1,nzmax-1 + m = b(nz)-cp(nz-1)*a(nz) + cp(nz) = c(nz)/m + up(nz) = (ur(nz)-up(nz-1)*a(nz))/m + vp(nz) = (vr(nz)-vp(nz-1)*a(nz))/m + enddo + ! initialize x + ur(nzmax-1) = up(nzmax-1) + vr(nzmax-1) = vp(nzmax-1) + + ! solve for x from the vectors c-prime and d-prime + !!PS do nz = nzmax-2, 1, -1 + do nz = nzmax-2, nzmin, -1 + ur(nz) = up(nz)-cp(nz)*ur(nz+1) + vr(nz) = vp(nz)-cp(nz)*vr(nz+1) + end do + + ! =========================== + ! RHS update + ! =========================== + !!PS do nz=1,nzmax-1 + do nz=nzmin,nzmax-1 + UV_rhs(1,nz,elem)=ur(nz) + UV_rhs(2,nz,elem)=vr(nz) + end do + end do !!! cycle over elements +!$OMP END DO +!$OMP END PARALLEL end subroutine impl_vert_visc_ale ! ! !=============================================================================== -subroutine oce_timestep_ale(n, mesh) +subroutine oce_timestep_ale(n, ice, dynamics, tracers, partit, mesh) use g_config use MOD_MESH + use MOD_TRACER + use MOD_DYN + USE MOD_ICE use o_ARRAYS use o_PARAM - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use g_comm_auto use io_RESTART !PS - use i_ARRAYS !PS use o_mixing_KPP_mod use g_cvmix_tke use g_cvmix_idemix @@ -2549,48 +2874,71 @@ subroutine oce_timestep_ale(n, mesh) use g_cvmix_tidal use Toy_Channel_Soufflet use oce_ale_interfaces - + use pressure_bv_interface + use pressure_force_4_linfs_interface + use pressure_force_4_zxxxx_interface + use compute_vel_rhs_interface + use solve_tracers_ale_interface + use write_step_info_interface + use check_blowup_interface + use fer_solve_interface IMPLICIT NONE + integer , intent(in) :: n + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + type(t_ice) , intent(inout), target :: ice + !___________________________________________________________________________ real(kind=8) :: t0,t1, t2, t30, t3, t4, t5, t6, t7, t8, t9, t10, loc, glo - integer :: n, node - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - + integer :: node +!NR + integer, save :: n_check=0 + real(kind=8) :: temp_check, sali_check + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: eta_n +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + eta_n => dynamics%eta_n(:) + + !___________________________________________________________________________ t0=MPI_Wtime() +! water_flux = 0.0_WP +! heat_flux = 0.0_WP +! stress_surf= 0.0_WP +! stress_node_surf= 0.0_WP -!!PS water_flux = 0.0_WP -!!PS heat_flux = 0.0_WP -!!PS stress_surf= 0.0_WP !___________________________________________________________________________ ! calculate equation of state, density, pressure and mixed layer depths if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call pressure_bv'//achar(27)//'[0m' - call pressure_bv(mesh) !!!!! HeRE change is made. It is linear EoS now. - + call pressure_bv(tracers, partit, mesh) !!!!! HeRE change is made. It is linear EoS now. + !___________________________________________________________________________ ! calculate calculate pressure gradient force if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call pressure_force_4_...'//achar(27)//'[0m' if (trim(which_ale)=='linfs') then - call pressure_force_4_linfs(mesh) + call pressure_force_4_linfs(tracers, partit, mesh) else - call pressure_force_4_zxxxx(mesh) + call pressure_force_4_zxxxx(tracers, partit, mesh) end if - + !___________________________________________________________________________ ! calculate alpha and beta ! it will be used for KPP, Redi, GM etc. Shall we keep it on in general case? - call sw_alpha_beta(tr_arr(:,:,1),tr_arr(:,:,2), mesh) - + call sw_alpha_beta(tracers%data(1)%values, tracers%data(2)%values, partit, mesh) + ! computes the xy gradient of a neutral surface; will be used by Redi, GM etc. - call compute_sigma_xy(tr_arr(:,:,1),tr_arr(:,:,2), mesh) - + call compute_sigma_xy(tracers%data(1)%values,tracers%data(2)%values, partit, mesh) + ! compute both: neutral slope and tapered neutral slope. Can be later combined with compute_sigma_xy ! will be primarily used for computing Redi diffusivities. etc? - call compute_neutral_slope(mesh) - + call compute_neutral_slope(partit, mesh) + !___________________________________________________________________________ - call status_check - + call status_check(partit) !___________________________________________________________________________ ! >>>>>> <<<<<< ! >>>>>> calculate vertical mixing coefficients for tracer (Kv) <<<<<< @@ -2611,37 +2959,42 @@ subroutine oce_timestep_ale(n, mesh) ! for debugging if (mod(mix_scheme_nmb,10)==6) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_idemix'//achar(27)//'[0m' - call calc_cvmix_idemix(mesh) + call calc_cvmix_idemix(partit, mesh) end if - + !___MAIN MIXING SCHEMES_____________________________________________________ ! use FESOM2.0 tuned k-profile parameterization for vertical mixing if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_KPP'//achar(27)//'[0m' - call oce_mixing_KPP(Av, Kv_double, mesh) - Kv=Kv_double(:,:,1) - call mo_convect(mesh) + call oce_mixing_KPP(Av, Kv_double, dynamics, tracers, partit, mesh) +!$OMP PARALLEL DO + DO node=1, myDim_nod2D+eDim_nod2D + Kv(:, node)=Kv_double(:, node, 1) + END DO +!$OMP END PARALLEL DO + + call mo_convect(ice, partit, mesh) ! use FESOM2.0 tuned pacanowski & philander parameterization for vertical ! mixing else if(mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_PP'//achar(27)//'[0m' - call oce_mixing_PP(mesh) - call mo_convect(mesh) + call oce_mixing_PP(dynamics, partit, mesh) + call mo_convect(ice, partit, mesh) ! use CVMIX KPP (Large at al. 1994) else if(mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_kpp'//achar(27)//'[0m' - call calc_cvmix_kpp(mesh) - call mo_convect(mesh) + call calc_cvmix_kpp(ice, dynamics, tracers, partit, mesh) + call mo_convect(ice, partit, mesh) ! use CVMIX PP (Pacanowski and Philander 1981) parameterisation for mixing ! based on Richardson number Ri = N^2/(du/dz)^2, using Brunt Väisälä frequency ! N^2 and vertical horizontal velocity shear dui/dz else if(mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_pp'//achar(27)//'[0m' - call calc_cvmix_pp(mesh) - call mo_convect(mesh) + call calc_cvmix_pp(dynamics, partit, mesh) + call mo_convect(ice, partit, mesh) ! use CVMIX TKE (turbulent kinetic energy closure) parameterisation for ! vertical mixing with or without the IDEMIX (dissipation of energy by @@ -2649,8 +3002,8 @@ subroutine oce_timestep_ale(n, mesh) ! Model for the diapycnal diffusivity induced by internal gravity waves" else if(mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_tke'//achar(27)//'[0m' - call calc_cvmix_tke(mesh) - call mo_convect(mesh) + call calc_cvmix_tke(dynamics, partit, mesh) + call mo_convect(ice, partit, mesh) end if @@ -2662,108 +3015,119 @@ subroutine oce_timestep_ale(n, mesh) ! mixing schemes if ( mod(mix_scheme_nmb,10)==7) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call calc_cvmix_tidal'//achar(27)//'[0m' - call calc_cvmix_tidal(mesh) + call calc_cvmix_tidal(partit, mesh) end if t1=MPI_Wtime() !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_vel_rhs'//achar(27)//'[0m' - if(mom_adv/=3) then - call compute_vel_rhs(mesh) - else - call compute_vel_rhs_vinv(mesh) - end if + call compute_vel_rhs(ice, dynamics, partit, mesh) !___________________________________________________________________________ - if (any(UV_rhs/=UV_rhs)) write(*,*) ' --> found NaN UV_rhs MARK 2' - call viscosity_filter(visc_option, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call viscosity_filter'//achar(27)//'[0m' + call viscosity_filter(dynamics%opt_visc, dynamics, partit, mesh) !___________________________________________________________________________ if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call impl_vert_visc_ale'//achar(27)//'[0m' - if(i_vert_visc) call impl_vert_visc_ale(mesh) + if(dynamics%use_ivertvisc) call impl_vert_visc_ale(dynamics,partit, mesh) t2=MPI_Wtime() - + !___________________________________________________________________________ ! >->->->->->->->->->->->-> ALE-part starts <-<-<-<-<-<-<-<-<-<-<-<- !___________________________________________________________________________ ! Update stiffness matrix by dhe=hbar(n+1/2)-hbar(n-1/2) on elements, only ! needed for zlevel and zstar - if (.not. trim(which_ale)=='linfs') call update_stiff_mat_ale(mesh) + if (.not. trim(which_ale)=='linfs') call update_stiff_mat_ale(partit, mesh) if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_ssh_rhs_ale'//achar(27)//'[0m' ! ssh_rhs=-alpha*\nabla\int(U_n+U_rhs)dz-(1-alpha)*... ! see "FESOM2: from finite elements to finte volumes, S. Danilov..." eq. (18) rhs - call compute_ssh_rhs_ale(mesh) - + call compute_ssh_rhs_ale(dynamics, partit, mesh) + ! Take updated ssh matrix and solve --> new ssh! t30=MPI_Wtime() - call solve_ssh_ale(mesh) - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_vel(mesh) + call solve_ssh_ale(dynamics, partit, mesh) + + if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) then + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call relax_zonal_vel'//achar(27)//'[0m' + call relax_zonal_vel(dynamics, partit, mesh) + end if t3=MPI_Wtime() ! estimate new horizontal velocity u^(n+1) ! u^(n+1) = u* + [-g * tau * theta * grad(eta^(n+1)-eta^(n)) ] if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_vel'//achar(27)//'[0m' - call update_vel(mesh) + call update_vel(dynamics, partit, mesh) ! --> eta_(n) --> eta_(n+1) = eta_(n) + deta = eta_(n) + (eta_(n+1) + eta_(n)) t4=MPI_Wtime() ! Update to hbar(n+3/2) and compute dhe to be used on the next step if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call compute_hbar_ale'//achar(27)//'[0m' - call compute_hbar_ale(mesh) - - !___________________________________________________________________________ - ! Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) - ! equation (14) Danlov et.al "the finite volume sea ice ocean model FESOM2 - ! ...if we do it here we don't need to write hbar_old into a restart file... - eta_n=alpha*hbar+(1.0_WP-alpha)*hbar_old + call compute_hbar_ale(dynamics, partit, mesh) + !___________________________________________________________________________ + ! - Current dynamic elevation alpha*hbar(n+1/2)+(1-alpha)*hbar(n-1/2) + ! equation (14) Danlov et.al "the finite volume sea ice ocean model FESOM2 + ! ...if we do it here we don't need to write hbar_old into a restart file... + ! - where(ulevels_nod2D==1) is used here because of the rigid lid + ! approximation under the cavity + ! - at points in the cavity the time derivative term in ssh matrix will be + ! omitted; and (14) will not be applied at cavity points. Additionally, + ! since there is no real elevation, but only surface pressure, there is + ! no layer motion under the cavity. In this case the ice sheet acts as a + ! rigid lid. +!$OMP PARALLEL DO + do node=1, myDim_nod2D+eDim_nod2D + if (ulevels_nod2D(node)==1) eta_n(node)=alpha*hbar(node)+(1.0_WP-alpha)*hbar_old(node) + end do +!$OMP END PARALLEL DO ! --> eta_(n) ! call zero_dynamics !DS, zeros several dynamical variables; to be used for testing new implementations! t5=MPI_Wtime() - + !___________________________________________________________________________ + ! Do horizontal and vertical scaling of GM/Redi diffusivity if (Fer_GM .or. Redi) then - call init_Redi_GM(mesh) + call init_Redi_GM(partit, mesh) end if - !___________________________________________________________________________ ! Implementation of Gent & McWiliams parameterization after R. Ferrari et al., 2010 ! does not belong directly to ALE formalism if (Fer_GM) then if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call fer_solve_Gamma'//achar(27)//'[0m' - call fer_solve_Gamma(mesh) - call fer_gamma2vel(mesh) + call fer_solve_Gamma(partit, mesh) + call fer_gamma2vel(dynamics, partit, mesh) end if - t6=MPI_Wtime() - + t6=MPI_Wtime() !___________________________________________________________________________ ! The main step of ALE procedure --> this is were the magic happens --> here ! is decided how change in hbar is distributed over the vertical layers if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call vert_vel_ale'//achar(27)//'[0m' - call vert_vel_ale(mesh) + call vert_vel_ale(dynamics, partit, mesh) t7=MPI_Wtime() - + !___________________________________________________________________________ ! solve tracer equation if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call solve_tracers_ale'//achar(27)//'[0m' - call solve_tracers_ale(mesh) + call solve_tracers_ale(ice, dynamics, tracers, partit, mesh) t8=MPI_Wtime() !___________________________________________________________________________ ! Update hnode=hnode_new, helem if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call update_thickness_ale'//achar(27)//'[0m' - call update_thickness_ale(mesh) + call update_thickness_ale(partit, mesh) t9=MPI_Wtime() - !___________________________________________________________________________ ! write out global fields for debugging - call write_step_info(n,logfile_outfreq, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call write_step_info'//achar(27)//'[0m' + call write_step_info(n,logfile_outfreq, ice, dynamics, tracers, partit, mesh) ! check model for blowup --> ! write_step_info and check_blowup require ! togeather around 2.5% of model runtime - call check_blowup(n, mesh) + if (flag_debug .and. mype==0) print *, achar(27)//'[36m'//' --> call check_blowup'//achar(27)//'[0m' + call check_blowup(n, ice, dynamics, tracers, partit, mesh) t10=MPI_Wtime() + !___________________________________________________________________________ ! write out execution times for ocean step parts rtime_oce = rtime_oce + (t10-t0)-(t10-t9) @@ -2794,4 +3158,20 @@ subroutine oce_timestep_ale(n, mesh) write(*,*) end if + +!NR Checksum for tracers, as they are most sensitive + + n_check = n_check+1 + temp_check = 0. + sali_check = 0. + do node=1,myDim_nod2D+eDim_nod2D + temp_check = temp_check + sum(tracers%data(1)%values(nlevels_nod2D(node)-1:ulevels_nod2D(node),node)) + sali_check = sali_check + sum(tracers%data(2)%values(nlevels_nod2D(node)-1:ulevels_nod2D(node),node)) + end do + call MPI_Allreduce(MPI_IN_PLACE, temp_check, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) + call MPI_Allreduce(MPI_IN_PLACE, sali_check, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) + + print *,'Check',n_check,temp_check,sali_check + end subroutine oce_timestep_ale + diff --git a/src/oce_ale_mixing_kpp.F90 b/src/oce_ale_mixing_kpp.F90 index d08060e98..22f93f4ca 100755 --- a/src/oce_ale_mixing_kpp.F90 +++ b/src/oce_ale_mixing_kpp.F90 @@ -8,10 +8,12 @@ MODULE o_mixing_KPP_mod !--------------------------------------------------------------- USE o_PARAM USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE MOD_DYN USE o_ARRAYS - USE g_PARSUP USE g_config - USE i_arrays USE g_forcing_arrays USE g_comm_auto USE g_support @@ -47,7 +49,7 @@ MODULE o_mixing_KPP_mod real(KIND=WP), parameter :: epsln = 1.0e-40_WP ! a small value - real(KIND=WP), parameter :: epsilon_kpp = 0.1_WP + real(KIND=WP), parameter :: epsilon_kpp = 0.1_WP real(KIND=WP), parameter :: vonk = 0.4_WP real(KIND=WP), parameter :: conc1 = 5.0_WP @@ -68,9 +70,13 @@ MODULE o_mixing_KPP_mod integer, parameter :: nnj = 480 ! number of values for ustar in the look up table real(KIND=WP), dimension(0:nni+1,0:nnj+1) :: wmt ! lookup table for wm, the turbulent velocity scale for momentum real(KIND=WP), dimension(0:nni+1,0:nnj+1) :: wst ! lookup table for ws, the turbulent velocity scale scalars - logical :: smooth_blmc=.true. - logical :: smooth_hbl =.false. - logical :: smooth_Ri =.false. + logical :: smooth_blmc =.true. + logical :: smooth_hbl =.false. + logical :: smooth_Ri_hor =.false. + logical :: smooth_Ri_ver =.false. + logical :: limit_hbl_ekmmob =.false. !.true. + + integer :: n ! to perform loop iterations contains @@ -92,7 +98,7 @@ MODULE o_mixing_KPP_mod ! PP: Kv(nl,node_size) and Av(nl,elem_size) ! ******************************************************************* - subroutine oce_mixing_kpp_init(mesh) + subroutine oce_mixing_kpp_init(partit, mesh) IMPLICIT NONE @@ -112,49 +118,54 @@ subroutine oce_mixing_kpp_init(mesh) integer :: i, j - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - allocate ( ghats ( nl-1, myDim_nod2D+eDim_nod2D )) ! nonlocal transport (s/m^2) - allocate ( hbl ( myDim_nod2D+eDim_nod2D )) ! boundary layer depth - ghats = 0.0_WP - hbl = 0.0_WP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - allocate ( bfsfc ( myDim_nod2D+eDim_nod2D )) ! surface buoyancy forcing (m^2/s^3) + allocate ( ghats ( nl-1, myDim_nod2D+eDim_nod2D )) ! nonlocal transport (s/m^2) + allocate ( hbl ( myDim_nod2D+eDim_nod2D )) ! boundary layer depth + allocate ( bfsfc ( myDim_nod2D+eDim_nod2D )) ! surface buoyancy forcing (m^2/s^3) allocate ( caseA ( myDim_nod2D+eDim_nod2D )) ! = 1 in case A; =0 in case B allocate ( stable ( myDim_nod2D+eDim_nod2D )) ! = 1 in stable forcing; =0 in unstable - allocate ( dkm1 ( myDim_nod2D+eDim_nod2D, 3 )) ! boundary layer diff at kbl-1 level - allocate ( blmc ( nl, myDim_nod2D+eDim_nod2D, 3 )) ! boundary layer mixing coefficients + allocate ( dkm1 ( myDim_nod2D+eDim_nod2D, 3 )) ! boundary layer diff at kbl-1 level + allocate ( blmc ( nl, myDim_nod2D+eDim_nod2D, 3 )) ! boundary layer mixing coefficients allocate ( ustar ( myDim_nod2D+eDim_nod2D )) ! surface friction velocity (m/s) allocate ( Bo ( myDim_nod2D+eDim_nod2D )) ! surface turb buoy. forcing (m^2/s^3) allocate ( dVsq ( nl, myDim_nod2D+eDim_nod2D )) ! (velocity shear re sfc)^2 (m/s)^2 allocate ( dbsfc ( nl, myDim_nod2D+eDim_nod2D )) ! buoyancy re sfc - allocate ( kbl ( myDim_nod2D+eDim_nod2D )) ! index of first grid level below hbl - - bfsfc = 0.0_WP - caseA = 0.0_WP - stable= 0.0_WP - dkm1 = 0.0_WP - blmc = 0.0_WP - ustar = 0.0_WP - Bo = 0.0_WP - dVsq = 0.0_WP - dbsfc = 0.0_WP - kbl = 0.0_WP + allocate ( kbl ( myDim_nod2D+eDim_nod2D )) ! index of first grid level below hbl + +!$OMP PARALLEL DO + DO n=1, myDim_nod2D+eDim_nod2D + ghats (:, n ) = 0.0_WP + hbl ( n ) = 0.0_WP + bfsfc ( n ) = 0.0_WP + caseA ( n ) = 0.0_WP + stable ( n ) = 0.0_WP + dkm1 ( n, :) = 0.0_WP + blmc (:, n, :) = 0.0_WP + ustar ( n ) = 0.0_WP + Bo ( n ) = 0.0_WP + dVsq (:, n ) = 0.0_WP + dbsfc (:, n ) = 0.0_WP + kbl ( n ) = 0.0_WP + END DO +!$OMP END PARALLEL DO ! ******************************************************************* ! Initialize some constants for kmix subroutines, and initialize ! for kmix subroutine "wscale" the 2D-lookup table for wm and ws ! as functions of ustar and zetahat (=vonk*sigma*hbl*bfsfc). ! ******************************************************************* - ! ******************************************************************* ! Define some non-dimensional constants (recall epsilon_kpp=0.1) ! ******************************************************************* -! Vtc used in eqn. 23 - Vtc = concv * sqrt(0.2_WP/concs/epsilon_kpp) / vonk**2 / Ricr - +! Vtc used in eqn. 23 + Vtc = concv * sqrt(0.2_WP/concs/epsilon_kpp) / vonk**2 / Ricr + ! ******************************************************************* ! The nonlocal transport term is nonzero ONLY FOR SCALARS in ! unstable (convective) forcing conditions where it has been @@ -164,7 +175,7 @@ subroutine oce_mixing_kpp_init(mesh) ! cg = cs in eqn. 20 ! ******************************************************************* - cg = cstar * vonk * (concs * vonk * epsilon_kpp)**(1._WP/3._WP) + cg = cstar * vonk * (concs * vonk * epsilon_kpp)**(1._WP/3._WP) ! ******************************************************************* ! Construct the wm and ws lookup tables (eqn. 13 & B1) @@ -235,7 +246,7 @@ end subroutine oce_mixing_kpp_init ! diffK = diffusion coefficient (m^2/s) ! !--------------------------------------------------------------- - subroutine oce_mixing_KPP(viscAE, diffK, mesh) + SUBROUTINE oce_mixing_KPP(viscAE, diffK, dynamics, tracers, partit, mesh) IMPLICIT NONE @@ -243,27 +254,38 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) ! Define allocatble arrays under oce_modules.F90 ! Allocate arrays under oce_setup_step.F90 ! ******************************************************************* - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + type(t_dyn) , intent(in), target :: dynamics integer :: node, kn, elem, elnodes(3) integer :: nz, ns, j, q, lay, lay_mi, nzmin, nzmax real(KIND=WP) :: smftu, smftv, aux, vol real(KIND=WP) :: dens_up, minmix real(KIND=WP) :: u_loc, v_loc -!!PS real(kind=WP) :: tsurf, ssurf, t, s real(kind=WP) :: usurf, vsurf real(kind=WP) :: rhopot, bulk, pz real(kind=WP) :: bulk_0, bulk_pz, bulk_pz2 real(kind=WP) :: rho_surf, rho_insitu - real(KIND=WP), dimension(mesh%nl, myDim_elem2D+eDim_elem2D), intent(inout) :: viscAE!for momentum (elements) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D) :: viscA !for momentum (nodes) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D, num_tracers), intent(inout) :: diffK !for T and S - -#include "associate_mesh.h" - - ViscA=0.0_WP - DO node=1, myDim_nod2D !+eDim_nod2D - nzmin = ulevels_nod2D(node) - nzmax = nlevels_nod2D(node) + real(KIND=WP), dimension(mesh%nl, partit%myDim_elem2D+partit%eDim_elem2D), intent(inout) :: viscAE!for momentum (elements) + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D) :: viscA !for momentum (nodes) + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D +partit%eDim_nod2D, tracers%num_tracers), intent(inout) :: diffK !for T and S + real(kind=WP), dimension(:,:,:), pointer :: UVnode +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) + +!$OMP PARALLEL DO + DO node=1, myDim_nod2D+eDim_nod2D + ViscA(:, node) = 0.0_WP + END DO +!$OMP END PARALLEL DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, usurf, vsurf, u_loc, v_loc) + DO node=1, myDim_nod2D !+eDim_nod2D + nzmin = ulevels_nod2D(node) + nzmax = nlevels_nod2D(node) ! ******************************************************************* ! Eqn. 21 @@ -281,37 +303,24 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) ! is calculated in a separate routine ! ******************************************************************* -! Surface layer is our reference dVsq(m2/s2) & dbsfc(m/s2) - !!PS dVsq (1,node) = 0.0_WP - !!PS dbsfc(1,node) = 0.0_WP - dVsq (nzmin,node) = 0.0_WP - dbsfc(nzmin,node) = 0.0_WP - -! Surface temperature and salinity - !!PS tsurf = tr_arr(1,node,1) - !!PS ssurf = tr_arr(1,node,2) -!!PS tsurf = tr_arr(nzmin,node,1) -!!PS ssurf = tr_arr(nzmin,node,2) -! Surface velocity - !!PS usurf = Unode(1,1,node) - !!PS vsurf = Unode(2,1,node) - usurf = Unode(1,nzmin,node) - vsurf = Unode(2,nzmin,node) - - !!PS DO nz=2, nlevels_nod2d(node)-1 - DO nz=nzmin+1, nzmax-1 - -! Squared velocity shear referenced to surface (@ Z) - u_loc = 0.5_WP * ( Unode(1,nz-1,node) + Unode(1,nz,node) ) - v_loc = 0.5_WP * ( Unode(2,nz-1,node) + Unode(2,nz,node) ) - - dVsq(nz,node) = ( usurf - u_loc )**2 + ( vsurf - v_loc )**2 - -! dbsfc (buoyancy difference with respect to the surface (m/s2)) is now computed in oce_ale_pressure_bv.F90 + ! Surface layer is our reference dVsq(m2/s2) & dbsfc(m/s2) + dVsq (nzmin,node) = 0.0_WP + dbsfc(nzmin,node) = 0.0_WP + + ! Surface velocity + usurf = UVnode(1,nzmin,node) + vsurf = UVnode(2,nzmin,node) + DO nz=nzmin+1, nzmax-1 + + ! Squared velocity shear referenced to surface (@ Z) + u_loc = 0.5_WP * ( UVnode(1,nz-1,node) + UVnode(1,nz,node) ) + v_loc = 0.5_WP * ( UVnode(2,nz-1,node) + UVnode(2,nz,node) ) + dVsq(nz,node) = ( usurf - u_loc )**2 + ( vsurf - v_loc )**2 + ! dbsfc (buoyancy difference with respect to the surface (m/s2)) is now computed in oce_ale_pressure_bv.F90 + END DO + dVsq ( nzmax, node ) = dVsq ( nzmax-1, node ) END DO - !!PS dVsq ( nlevels_nod2d(node), node ) = dVsq ( nlevels_nod2d(node)-1, node ) - dVsq ( nzmax, node ) = dVsq ( nzmax-1, node ) - END DO +!$OMP END PARALLEL DO ! ******************************************************************* ! compute thermal and haline expansion coefficients (without factor of rho). @@ -325,63 +334,53 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) ! Reason: oce_timestep(n) is called after subroutine oce_mixing_(K)PP ! where compute_sigma_xy -> sw_alpha_beta is called (Fer_GM should be set to true) ! ******************************************************************* -! IF ( .not. Fer_GM ) THEN -! CALL sw_alpha_beta(tr_arr(:,:,1),tr_arr(:,:,2)) -! ENDIF -! ******************************************************************* ! friction velocity, turbulent sfc buoyancy forcing ! ustar = sqrt( sqrt( stress_atmoce_x^2 + stress_atmoce_y^2 ) / rho ) (m/s) ! bo = -g * ( Talpha*heat_flux/vcpw + Sbeta * salinity*water_flux ) (m^2/s^3) ! ******************************************************************* - - DO node=1, myDim_nod2D !+eDim_nod2D - nzmin = ulevels_nod2D(node) - ustar(node) = sqrt( sqrt( stress_atmoce_x(node)**2 + stress_atmoce_y(node)**2 )*density_0_r ) ! @ the surface (eqn. 2) - -! Surface buoyancy forcing (eqns. A2c & A2d & A3b & A3d) - !!PS Bo(node) = -g * ( sw_alpha(1,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up - !!PS + sw_beta (1,node) * water_flux(node) * tr_arr(1,node,2)) - Bo(node) = -g * ( sw_alpha(nzmin,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up - + sw_beta (nzmin,node) * water_flux(node) * tr_arr(nzmin,node,2)) - END DO - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nzmin) + DO node=1, myDim_nod2D + nzmin = ulevels_nod2D(node) + ustar(node) = sqrt( sqrt( stress_atmoce_x(node)**2 + stress_atmoce_y(node)**2 )*density_0_r ) ! @ the surface (eqn. 2) + ! Surface buoyancy forcing (eqns. A2c & A2d & A3b & A3d) + Bo(node) = -g * ( sw_alpha(nzmin,node) * heat_flux(node) / vcpw & !heat_flux & water_flux: positive up + + sw_beta (nzmin,node) * water_flux(node) * tracers%data(2)%values(nzmin,node)) + END DO +!$OMP END PARALLEL DO ! compute interior mixing coefficients everywhere, due to constant ! internal wave activity, static instability, and local shear ! instability. - CALL ri_iwmix(viscA, diffK, mesh) + CALL ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) ! add double diffusion - IF (double_diffusion) then - CALL ddmix(diffK, mesh) - END IF + IF (double_diffusion) then + CALL ddmix(diffK, tracers, partit, mesh) + END IF ! boundary layer mixing coefficients: diagnose new b.l. depth - CALL bldepth(mesh) - + CALL bldepth(partit, mesh) ! boundary layer diffusivities - CALL blmix_kpp(viscA, diffK, mesh) - + CALL blmix_kpp(viscA, diffK, partit, mesh) ! enhance diffusivity at interface kbl - 1 - CALL enhance(viscA, diffK, mesh) + CALL enhance(viscA, diffK, partit, mesh) - if (smooth_blmc) then - call exchange_nod(blmc(:,:,1)) - call exchange_nod(blmc(:,:,2)) - call exchange_nod(blmc(:,:,3)) - do j=1, 3 + if (smooth_blmc) then + call exchange_nod(blmc(:,:,1), partit) + call exchange_nod(blmc(:,:,2), partit) + call exchange_nod(blmc(:,:,3), partit) + do j=1, 3 !_____________________________________________________________________ ! all loops go over myDim_nod2D so no halo information --> for smoothing ! haloinfo is required --> therefor exchange_nod - call smooth_nod(blmc(:,:,j), 3, mesh) - end do - end if - + call smooth_nod(blmc(:,:,j), 3, partit, mesh) + end do + end if +!$OMP BARRIER ! then combine blmc and viscA/diffK - - DO node=1, myDim_nod2D - nzmin = ulevels_nod2D(node) - nzmax = nlevels_nod2D(node) - !!PS DO nz=2,nlevels_nod2d(node)-1 - DO nz=nzmin+1,nzmax-1 +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax) + DO node=1, myDim_nod2D + nzmin = ulevels_nod2D(node) + nzmax = nlevels_nod2D(node) + DO nz=nzmin+1,nzmax-1 IF (nz < kbl(node)) then ! within the bounday layer viscA(nz,node ) = MAX(viscA(nz,node ), blmc(nz,node,1)) diffK(nz,node,1) = MAX(diffK(nz,node,1), blmc(nz,node,2)) @@ -389,46 +388,36 @@ subroutine oce_mixing_KPP(viscAE, diffK, mesh) ELSE ghats(nz,node)=0.0_WP ! outside the boundary layer set nonlocal terms to zero ENDIF - END DO - END DO + END DO + END DO +!$OMP END PARALLEL DO + !_____________________________________________________________________________ + ! do all node loops only over myDim_nod2D --> therefore do an halo exchange + ! only at the end should save some time + call exchange_nod(diffK(:,:,1), partit) + call exchange_nod(diffK(:,:,2), partit) + call exchange_nod(ghats, partit) - !_____________________________________________________________________________ - ! do all node loops only over myDim_nod2D --> therefore do an halo exchange - ! only at the end should save some time - call exchange_nod(diffK(:,:,1)) - call exchange_nod(diffK(:,:,2)) - -! OVER ELEMENTS - call exchange_nod(viscA) !Warning: don't forget to communicate before averaging on elements!!! - minmix=3.0e-3_WP - DO elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - viscAE(nz,elem) = SUM(viscA(nz,elnodes))/3.0_WP ! (elementwise) + ! OVER ELEMENTS + call exchange_nod(viscA, partit) !Warning: don't forget to communicate before averaging on elements!!! +!$OMP BARRIER + minmix=3.0e-3_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax) + DO elem = 1, myDim_elem2D + elnodes = elem2D_nodes(:,elem) + nzmin = ulevels(elem) + nzmax = nlevels(elem) + DO nz=nzmin,nzmax-1 + viscAE(nz, elem) = SUM(viscA(nz, elnodes))/3.0_WP ! (elementwise) + END DO + viscAE( nlevels(elem), elem ) = viscAE( nlevels(elem)-1, elem) + ! Set the mixing coeff. in the first layer above some limiting value + ! this is very helpful to avoid huge surface velocity when vertical + ! viscosity is very small derived from the KPP scheme. + ! I strongly recommend this trick, at least in the current FESOM version. + IF (viscAE(nzmin,elem) < minmix) viscAE(nzmin,elem) = minmix END DO - viscAE( nlevels(elem), elem ) = viscAE( nlevels(elem)-1, elem ) - - ! Set the mixing coeff. in the first layer above some limiting value - ! this is very helpful to avoid huge surface velocity when vertical - ! viscosity is very small derived from the KPP scheme. - ! I strongly recommend this trick, at least in the current FESOM version. - if (viscAE(nzmin,elem) < minmix) viscAE(nzmin,elem) = minmix - - END DO - -!!PS ! Set the mixing coeff. in the first layer above some limiting value -!!PS ! this is very helpful to avoid huge surface velocity when vertical -!!PS ! viscosity is very small derived from the KPP scheme. -!!PS ! I strongly recommend this trick, at least in the current FESOM version. -!!PS minmix=3.0e-3_WP -!!PS WHERE(viscAE(nzmin,:) < minmix) -!!PS viscAE(nzmin,:) = minmix -!!PS END WHERE - -! non-local contribution will be added to oce_tracer_mod directly +!$OMP END PARALLEL DO END SUBROUTINE oce_mixing_kpp @@ -472,7 +461,7 @@ END SUBROUTINE oce_mixing_kpp ! real caseA(t2d) ! =1 in case A, =0 in case B ! integer kbl(t2d) ! index of first grid level below hbl ! - SUBROUTINE bldepth(mesh) + SUBROUTINE bldepth(partit, mesh) IMPLICIT NONE @@ -485,35 +474,41 @@ SUBROUTINE bldepth(mesh) real(KIND=WP), parameter :: cekman = 0.7_WP ! constant for Ekman depth real(KIND=WP), parameter :: cmonob = 1.0_WP ! constant for Monin-Obukhov depth - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" -! Initialize hbl and kbl to bottomed out values +!$OMP PARALLEL DO + ! Initialize hbl and kbl to bottomed out values DO node=1, myDim_nod2D !+eDim_nod2D -! Index of first grid level below hbl + ! Index of first grid level below hbl kbl(node) = nlevels_nod2D(node) -! Boundary layer depth + ! Boundary layer depth hbl(node) = ABS( zbar_3d_n( nlevels_nod2d(node),node ) ) END DO +!$OMP END PARALLEL DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, coeff_sw, Rib_km1, zk, zkm1, sigma, zehat, & +!$OMP wm, ws, bvsq, Vtsq, Ritop, Rib_k, dzup, hekman, hmonob, hlimit) +!$OMP DO DO node=1, myDim_nod2D !+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) IF (use_sw_pene) THEN - !!PS coeff_sw = g * sw_alpha(1,node) ! @ the surface @ Z (m/s2/K) coeff_sw = g * sw_alpha(nzmin,node) ! @ the surface @ Z (m/s2/K) END IF Rib_km1 = 0.0_WP - !!PS nk = nlevels_nod2D(node) bfsfc(node) = Bo(node) - !!PS DO nz=2,nk DO nz=nzmin+1,nzmax - zk = ABS( zbar_3d_n(nz,node) ) + zk = ABS( zbar_3d_n(nz, node) ) zkm1 = ABS( zbar_3d_n(nz-1,node) ) ! bfsfc = Bo + sw contribution @@ -587,7 +582,7 @@ SUBROUTINE bldepth(mesh) !!PS IF (bfsfc(node) > 0.0_WP) THEN IF (bfsfc(node) > 0.0_WP .and. nzmin==1) THEN !-> no ekman or monin-obukov when there is cavity - hekman = cekman * ustar(node) / MAX( ABS (coriolis_node(node) ), epsln) + hekman = cekman * ustar(node) / MAX( ABS (mesh%coriolis_node(node) ), epsln) hmonob = cmonob * ustar(node) * ustar(node) * ustar(node) & /vonk / (bfsfc(node) + epsln) hlimit = stable(node) * AMIN1( hekman, hmonob ) @@ -595,14 +590,17 @@ SUBROUTINE bldepth(mesh) hbl(node) = MAX( hbl(node), ABS(zbar_3d_n(2,node)) ) END IF END DO +!$OMP END DO +!$OMP END PARALLEL if (smooth_hbl) then - call exchange_nod(hbl) - call smooth_nod(hbl, 3, mesh) + call exchange_nod(hbl, partit) + call smooth_nod(hbl, 3, partit, mesh) end if +!$OMP BARRIER - DO node=1, myDim_nod2D !+eDim_nod2D - !!PS nk = nlevels_nod2D(node) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, dzup, coeff_sw) + DO node=1, myDim_nod2D nzmax = nlevels_nod2D(node) nzmin = ulevels_nod2D(node) !----------------------------------------------------------------------- @@ -617,11 +615,13 @@ SUBROUTINE bldepth(mesh) EXIT END IF END DO + !----------------------------------------------------------------------- ! find stability and buoyancy forcing for final hbl values !----------------------------------------------------------------------- IF (use_sw_pene) THEN - ! Linear interpolation of sw_3d to depth of hbl + coeff_sw = g * sw_alpha(nzmin,node) ! @ the surface @ Z (m/s2/K) + ! Linear interpolation of sw_3d to depth of hbl bfsfc(node) = Bo(node) + & coeff_sw * & ( sw_3d(nzmin,node) - & @@ -637,12 +637,10 @@ SUBROUTINE bldepth(mesh) ! (if hbl is below (deeper than) the mid point of level kbl ! then caseA=0 else caseA=1) !----------------------------------------------------------------------- - -! nz=kbl(node) dzup = zbar_3d_n(kbl(node)-1,node) - zbar_3d_n(kbl(node),node) caseA(node) = 0.5_WP + SIGN( 0.5_WP, ABS( zbar_3d_n(kbl(node),node) ) - 0.5_WP * dzup - hbl(node) ) - - END DO + END DO +!$OMP END PARALLEL DO END SUBROUTINE bldepth @@ -725,118 +723,115 @@ END SUBROUTINE wscale ! visc = viscosity coefficient (m**2/s) ! diff = diffusion coefficient (m**2/s) ! - subroutine ri_iwmix(viscA, diffK, mesh) + subroutine ri_iwmix(viscA, diffK, dynamics, tracers, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + type(t_dyn), intent(in), target :: dynamics integer :: node, nz, mr, nzmin, nzmax real(KIND=WP) , parameter :: Riinfty = 0.8_WP ! local Richardson Number limit for shear instability (LMD 1994 uses 0.7) real(KIND=WP) :: ri_prev, tmp real(KIND=WP) :: Rigg, ratio, frit real(KIND=WP) :: dz_inv, shear, aux, dep, lat, Kv0_b - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D ), intent(inout) :: viscA !for momentum (nodes) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D ,num_tracers), intent(inout) :: diffK !for T and S + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D ), intent(inout) :: viscA !for momentum (nodes) + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D ,tracers%num_tracers), intent(inout) :: diffK !for T and S ! Put them under the namelist.oce logical :: smooth_richardson_number = .false. integer :: num_smoothings = 1 ! for vertical smoothing of Richardson number - -#include "associate_mesh.h" - -! Compute Richardson number and store it as diffK to save memory + real(kind=WP), dimension(:,:,:), pointer :: UVnode +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UVnode=>dynamics%uvnode(:,:,:) + + ! Compute Richardson number and store it as diffK to save memory +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, dz_inv, shear, mr, ri_prev, tmp) DO node=1, myDim_nod2D! +eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) !!PS DO nz=2,nlevels_nod2d(node)-1 DO nz=nzmin+1,nzmax-1 dz_inv = 1.0_WP / (Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) ! > 0 - shear = ( Unode(1, nz-1, node) - Unode(1, nz, node) )**2 + & - ( Unode(2, nz-1, node) - Unode(2, nz, node) )**2 + shear = ( UVnode(1, nz-1, node) - UVnode(1, nz, node) )**2 + & + ( UVnode(2, nz-1, node) - UVnode(2, nz, node) )**2 shear = shear * dz_inv * dz_inv diffK(nz,node,1) = MAX( bvfreq(nz,node), 0.0_WP ) / (shear + epsln) ! To avoid NaNs at start END DO ! minimum Richardson number is 0 - ! ******************************************************************* ! No need to set Richardson number for the surface and bottom layers ! diffK @ zbar. Model do not use these levels !!!!!!! ! ******************************************************************* - - !!PS diffK(1,node,1)=diffK(2,node,1) - !!PS diffK(nlevels_nod2d(node),node,1)=diffK(nlevels_nod2d(node)-1,node,1) diffK(nzmin,node,1)=diffK(nzmin+1,node,1) diffK(nzmax,node,1)=diffK(nzmax-1,node,1) -! smooth Richardson number in the vertical using a 1-2-1 filter - !!PS IF(smooth_richardson_number .and. nlevels_nod2d(node)>2) then - IF(smooth_richardson_number .and. nzmax>2) then - DO mr=1,num_smoothings + ! smooth Richardson number in the vertical using a 1-2-1 filter + IF(smooth_Ri_ver .and. nzmax > 2) then + DO mr=1, num_smoothings ri_prev = 0.25_WP * diffK(1, node, 1) - !!PS DO nz=2,nlevels_nod2d(node)-1 - DO nz=nzmin+1,nzmax-1 - tmp = diffK(nz,node,1) - diffK(nz,node,1) = ri_prev + 0.5_WP * diffK(nz,node,1) + 0.25_WP * diffK(nz+1,node,1) - ri_prev = 0.25_WP * tmp + DO nz=nzmin+1, nzmax-1 + tmp = diffK(nz,node,1) + diffK(nz,node,1) = ri_prev + 0.5_WP * diffK(nz,node,1) + 0.25_WP * diffK(nz+1,node,1) + ri_prev = 0.25_WP * tmp END DO END DO END IF END DO - - if (smooth_Ri) then - call smooth_nod(diffK(:,:,1), 3, mesh) - end if - +!$OMP END PARALLEL DO + IF (smooth_Ri_hor) then + call smooth_nod(diffK(:,:,1), 3, partit, mesh) + END IF +!$OMP BARRIER !___________________________________________________________________________ ! compute viscA and diffK - do node=1, myDim_nod2D !+eDim_nod2D - nzmin = ulevels_nod2D(node) - nzmax = nlevels_nod2D(node) - !!PS do nz=2,nlevels_nod2d(node)-1 - do nz=nzmin+1,nzmax-1 - !___________________________________________________________________ - ! evaluate function of Ri# for shear instability eqn. (28b&c) - Rigg = AMAX1( diffK(nz,node,1) , 0.0_WP) - ratio = AMIN1( Rigg/Riinfty , 1.0_WP ) - frit = (1.0_WP - ratio*ratio) - frit = frit*frit*frit - !___________________________________________________________________ - ! viscosity - viscA(nz,node) = visc_sh_limit * frit + A_ver ! A_ver= 1.e-4 Vertical harm. visc. +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, Rigg, ratio, frit, Kv0_b) + do node=1, myDim_nod2D + nzmin = ulevels_nod2D(node) + nzmax = nlevels_nod2D(node) + do nz=nzmin+1, nzmax-1 + !___________________________________________________________________ + ! evaluate function of Ri# for shear instability eqn. (28b&c) + Rigg = AMAX1( diffK(nz,node,1) , 0.0_WP) + ratio = AMIN1( Rigg/Riinfty , 1.0_WP ) + frit = (1.0_WP - ratio*ratio) + frit = frit*frit*frit + !___________________________________________________________________ + ! viscosity + viscA(nz,node) = visc_sh_limit * frit + A_ver ! A_ver= 1.e-4 Vertical harm. visc. - !___________________________________________________________________ - ! diffusivity - ! set constant background diffusivity with namelist value K_ver - if(Kv0_const) then - diffK(nz,node,1) = diff_sh_limit * frit + K_ver - - ! set latitudinal and depth dependent background diffusivity after Qiangs - ! FESOM1.4 approach - else - ! --> see in oce_ale_mixing_pp.F90 --> there are different - ! schemes of the vertical background diffusivity possible strongly - ! depending on purpos and tuning especially with arctic focus - call Kv0_background_qiang(Kv0_b,geo_coord_nod2D(2,node)/rad,abs(zbar_3d_n(nz,node))) - diffK(nz,node,1) = diff_sh_limit * frit + Kv0_b - end if - diffK(nz,node,2) = diffK(nz,node,1) - end do ! --> do nz=2,nlevels_nod2d(node)-1 + !___________________________________________________________________ + ! diffusivity + ! set constant background diffusivity with namelist value K_ver + if (Kv0_const) then + diffK(nz,node,1) = diff_sh_limit * frit + K_ver + ! set latitudinal and depth dependent background diffusivity after Qiangs + ! FESOM1.4 approach + else + ! --> see in oce_ale_mixing_pp.F90 --> there are different + ! schemes of the vertical background diffusivity possible strongly + ! depending on purpos and tuning especially with arctic focus + call Kv0_background_qiang(Kv0_b,geo_coord_nod2D(2,node)/rad,abs(zbar_3d_n(nz,node))) + diffK(nz,node,1) = diff_sh_limit * frit + Kv0_b + end if + diffK(nz,node,2) = diffK(nz,node,1) + end do ! --> do nz=2,nlevels_nod2d(node)-1 !_______________________________________________________________________ !!! No need to set surface and bottom diffusivity. diffK @ zbar !!! !!! Model do not use these levels !!!!!!! !!! - !!PS viscA( 1, node ) = viscA(2, node ) - !!PS diffK( 1, node, 1 ) = diffK(2, node, 1) - !!PS diffK( 1, node, 2 ) = diffK(2, node, 2) - viscA( nzmin, node ) = viscA(nzmin+1, node ) - diffK( nzmin, node, 1 ) = diffK(nzmin+1, node, 1) - diffK( nzmin, node, 2 ) = diffK(nzmin+1, node, 2) - !!PS viscA( nlevels_nod2d(node), node ) = viscA( nlevels_nod2d(node)-1, node ) - !!PS diffK( nlevels_nod2d(node), node, 1 ) = diffK( nlevels_nod2d(node)-1, node, 1 ) - !!PS diffK( nlevels_nod2d(node), node, 2 ) = diffK( nlevels_nod2d(node)-1, node, 2 ) + viscA( nzmin, node ) = viscA( nzmin+1, node ) + diffK( nzmin, node, 1 ) = diffK( nzmin+1, node, 1 ) + diffK( nzmin, node, 2 ) = diffK( nzmin+1, node, 2 ) + viscA( nzmax, node ) = viscA( nzmax-1, node ) diffK( nzmax, node, 1 ) = diffK( nzmax-1, node, 1 ) diffK( nzmax, node, 2 ) = diffK( nzmax-1, node, 2 ) end do !-->do node=1, myDim_nod2D+eDim_nod2D +!$OMP END PARALLEL DO end subroutine ri_iwmix !####################################################################### @@ -850,10 +845,12 @@ end subroutine ri_iwmix ! ! output: update diffu ! - subroutine ddmix(diffK, mesh) + subroutine ddmix(diffK, tracers, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers real(KIND=WP), parameter :: Rrho0 = 1.9_WP ! limit for double diffusive density ratio real(KIND=WP), parameter :: dsfmax = 1.e-4_WP ! (m^2/s) max diffusivity in case of salt fingering real(KIND=WP), parameter :: viscosity_molecular = 1.5e-6_WP ! (m^2/s) @@ -862,19 +859,20 @@ subroutine ddmix(diffK, mesh) real(KIND=WP) :: alphaDT, betaDS real(KIND=WP) :: diffdd, Rrho, prandtl - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D, 2), intent(inout) :: diffK ! for T and S + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D, 2), intent(inout) :: diffK ! for T and S -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - DO node=1, myDim_nod2D!+eDim_nod2D +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, alphaDT, betaDS, Rrho, diffdd, prandtl) + DO node=1, myDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) - !!PS DO nz=2,nlevels_nod2d(node)-1 DO nz=nzmin+1,nzmax-1 - - ! alphaDT and betaDS @Z - alphaDT = sw_alpha(nz-1,node) * tr_arr(nz-1,node,1) - betaDS = sw_beta (nz-1,node) * tr_arr(nz-1,node,2) + alphaDT = sw_alpha(nz-1,node) * tracers%data(1)%values(nz-1,node) + betaDS = sw_beta (nz-1,node) * tracers%data(2)%values(nz-1,node) IF (alphaDT > betaDS .and. betaDS > 0.0_WP) THEN @@ -885,9 +883,7 @@ subroutine ddmix(diffK, mesh) ! ******************************************************************* Rrho = MIN(alphaDT / betaDS, Rrho0) - ! diffdd = dsfmax*(1.0-((Rrho-1)/(Rrho0-1))**2)**pexp2 ! (very old code) - ! diffdd = 1.0-((Rrho-1)/(Rrho0-1))**2 ! (less old code) - diffdd = 1.0_WP -( (Rrho-1.0_WP) / (Rrho0-1.0_WP) ) ! (new code) + diffdd = 1.0_WP -( (Rrho-1.0_WP) / (Rrho0-1.0_WP) ) diffdd = dsfmax * diffdd * diffdd * diffdd diffK(nz,node,1) = diffK(nz,node,1) + 0.7_WP * diffdd ! for temperature @@ -916,17 +912,12 @@ subroutine ddmix(diffK, mesh) ! No need to set surface and bottom diffusivity. diffK @ zbar ! Model do not use these levels !!!!!!! ! ******************************************************************* - - !!PS diffK( 1, node, 1 ) = diffK( 2, node, 1 ) - !!PS diffK( 1, node, 2 ) = diffK( 2, node, 2 ) diffK( nzmin, node, 1 ) = diffK( nzmin+1, node, 1 ) diffK( nzmin, node, 2 ) = diffK( nzmin+1, node, 2 ) - !!PS diffK( nlevels_nod2d(node), node, 1 ) = diffK( nlevels_nod2d(node)-1, node, 1 ) - !!PS diffK( nlevels_nod2d(node), node, 2 ) = diffK( nlevels_nod2d(node)-1, node, 2 ) diffK( nzmax, node, 1 ) = diffK( nzmax-1, node, 1 ) diffK( nzmax, node, 2 ) = diffK( nzmax-1, node, 2 ) - END DO +!$OMP END PARALLEL DO end subroutine ddmix !####################################################################### @@ -951,48 +942,49 @@ end subroutine ddmix ! real blmc(3d,3) = boundary layer mixing coeff.(m**2/s) ! real ghats(3d) = nonlocal scalar transport ! - subroutine blmix_kpp(viscA,diffK, mesh) + subroutine blmix_kpp(viscA,diffK, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - integer :: node, nz, kn, elem, elnodes(3), knm1, knp1, nl1, nu1 + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: node, nz, kn, knm1, knp1, nl1, nu1 real(KIND=WP) :: delhat, R, dvdzup, dvdzdn real(KIND=WP) :: viscp, difsp, diftp, visch, difsh, difth, f1 real(KIND=WP) :: sig, a1, a2, a3, Gm, Gs, Gt real(KIND=WP) :: sigma, zehat, wm, ws real(KIND=WP) :: gat1m, gat1t, gat1s, dat1m, dat1s, dat1t - real(KIND=WP) :: dthick(mesh%nl), diff_col(mesh%nl,3), diff_colE(mesh%nl) + real(KIND=WP) :: dthick(mesh%nl), diff_col(mesh%nl,3) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D ), intent(inout) :: viscA ! for momentum (nodes) - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D, 2 ), intent(inout) :: diffK ! for T and S + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D ), intent(inout) :: viscA ! for momentum (nodes) + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D, 2 ), intent(inout) :: diffK ! for T and S -#include "associate_mesh.h" - - blmc = 0.0_WP - -! ******************************************************************* -! Kv over the NODE -! ******************************************************************* - DO node=1, myDim_nod2D !+eDim_nod2D +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" +!$OMP PARALLEL DO + DO node=1, myDim_nod2D+eDim_nod2D + blmc (:, node, :) = 0.0_WP + END DO +!$OMP END PARALLEL DO +! ******************************************************************* +! Kv over the NODE +! ******************************************************************* +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, kn, knm1, knp1, nl1, nu1, delhat, R, dvdzup, dvdzdn, viscp, difsp, diftp, visch, difsh, difth, f1, sig, & +!$OMP a1, a2, a3, Gm, Gs, Gt, sigma, zehat, wm, ws, gat1m, gat1t, gat1s, dat1m, dat1s, dat1t, dthick, diff_col) +!$OMP DO + DO node=1, myDim_nod2D nl1=nlevels_nod2d(node) nu1=ulevels_nod2d(node) - if(nl1<3) cycle ! a temporary solution + if(nl1 < 3) cycle ! a temporary solution if(nl1-nu1 < 2) cycle - ! level thickness - !!PS dthick(2:nl1-1)=0.5_WP*(ABS(zbar_3d_n(3:nl1,node))-ABS(zbar_3d_n(1:nl1-2,node))) - !!PS dthick(1)=dthick(2) - !!PS dthick(nu1+1:nl1-1)=0.5_WP*(ABS(zbar_3d_n(nu1+2:nl1,node))-ABS(zbar_3d_n(nu1:nl1-2,node))) - !!PS dthick(nu1)=dthick(nu1+1) - !!PS dthick(nl1)=dthick(nl1-1) dthick(nu1+1:nl1-1)=0.5_WP*(hnode(nu1:nl1-2,node)+hnode(nu1+1:nl1-1,node) ) dthick(nu1)=hnode(nu1,node)*0.5_WP dthick(nl1)=hnode(nl1-1,node)*0.5_WP - !!PS diff_col(1:nl1-1,1)=viscA(1:nl1-1,node) - !!PS diff_col(1:nl1-1,2:3)=diffK(1:nl1-1,node,:) diff_col(nu1:nl1-1,1)=viscA(nu1:nl1-1,node) diff_col(nu1:nl1-1,2:3)=diffK(nu1:nl1-1,node,:) diff_col(nl1,:)=diff_col(nl1-1,:) @@ -1007,8 +999,7 @@ subroutine blmix_kpp(viscA,diffK, mesh) kn = INT(caseA(node)+epsln) *(kbl(node) -1) + & (1-INT(caseA(node)+epsln)) * kbl(node) - kn = min(kn,nl1-1) - !!PS knm1 = MAX(kn-1,1) + kn = MIN(kn,nl1-1) knm1 = MAX(kn-1,nu1) knp1 = MIN(kn+1,nl1) @@ -1016,8 +1007,6 @@ subroutine blmix_kpp(viscA,diffK, mesh) ! Find the interior viscosities and derivatives at hbl(i) ! eqn. (18) ! ******************************************************************* - -!!PS delhat = ABS(Z(kn))-hbl(node) delhat = ABS(Z_3d_n(kn,node))-hbl(node) R = 1.0_WP - delhat / dthick(kn) @@ -1064,8 +1053,6 @@ subroutine blmix_kpp(viscA,diffK, mesh) ! ******************************************************************* ! Compute turbulent velocity scales on the interfaces ! ******************************************************************* - -!!PS sig = ABS(Z(nz)) / (hbl(node)+epsln) sig = ABS(Z_3d_n(nz,node)) / (hbl(node)+epsln) sigma = stable(node) * sig & + (1.0_WP - stable(node)) * AMIN1(sig, epsilon_kpp) @@ -1124,8 +1111,9 @@ subroutine blmix_kpp(viscA,diffK, mesh) dkm1(node,1) = hbl(node) * wm * sig * (1.0_WP + sig * Gm) dkm1(node,2) = hbl(node) * ws * sig * (1.0_WP + sig * Gt) dkm1(node,3) = hbl(node) * ws * sig * (1.0_WP + sig * Gs) - END DO +!$OMP END DO +!$OMP END PARALLEL end subroutine blmix_kpp !####################################################################### @@ -1145,18 +1133,22 @@ end subroutine blmix_kpp ! output ! real blmc(n3,3) = enhanced boundary layer mixing coefficient ! - subroutine enhance(viscA, diffK, mesh) + subroutine enhance(viscA, diffK, partit, mesh) IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - real(KIND=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D), intent(inout) :: viscA !for momentum (nodes) - real(kind=WP), dimension(mesh%nl, myDim_nod2D+eDim_nod2D,2), intent(inout) :: diffK !for T and S - + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(KIND=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D), intent(inout) :: viscA !for momentum (nodes) + real(kind=WP), dimension(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D,2), intent(inout) :: diffK !for T and S integer :: nz, node, k real(kind=WP) :: delta, dkmp5, dstar -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - DO node=1, myDim_nod2D !+eDim_nod2D +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(nz, node, k, delta, dkmp5, dstar) + DO node=1, myDim_nod2D k = kbl(node) - 1 delta = (hbl(node) + zbar_3d_n(k,node)) / (zbar_3d_n(k,node) - zbar_3d_n(k+1,node)) @@ -1184,6 +1176,6 @@ subroutine enhance(viscA, diffK, mesh) ghats(k,node) = (1.0_WP-caseA(node)) * ghats(k,node) ! plot ghats END DO +!$OMP END PARALLEL DO end subroutine enhance - END MODULE o_mixing_KPP_mod diff --git a/src/oce_ale_mixing_pp.F90 b/src/oce_ale_mixing_pp.F90 index 982882c08..e816dcbad 100644 --- a/src/oce_ale_mixing_pp.F90 +++ b/src/oce_ale_mixing_pp.F90 @@ -1,5 +1,5 @@ !======================================================================= -subroutine oce_mixing_pp(mesh) +subroutine oce_mixing_pp(dynamics, partit, mesh) ! Compute Richardson number dependent Av and Kv following ! Pacanowski and Philander, 1981 ! Av = Avmax * factor**2 + Av0, @@ -15,19 +15,28 @@ subroutine oce_mixing_pp(mesh) ! SD no if in Kv computations (only minor differences are introduced) ! ! -use MOD_MESH +USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP +USE MOD_DYN USE o_PARAM USE o_ARRAYS -USE g_PARSUP USE g_config -use i_arrays IMPLICIT NONE -type(t_mesh), intent(in) , target :: mesh -real(kind=WP) :: dz_inv, bv, shear, a, rho_up, rho_dn, t, s, Kv0_b -integer :: node, nz, nzmax, nzmin, elem, elnodes(3), i +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +type(t_dyn), intent(inout), target :: dynamics +real(kind=WP) :: dz_inv, bv, shear, a, rho_up, rho_dn, t, s, Kv0_b +integer :: node, nz, nzmax, nzmin, elem, elnodes(3), i +real(kind=WP), dimension(:,:,:), pointer :: UVnode +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" +UVnode=>dynamics%uvnode(:,:,:) + -#include "associate_mesh.h" !___________________________________________________________________________ do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2d(node) @@ -38,8 +47,8 @@ subroutine oce_mixing_pp(mesh) !!PS do nz=2,nlevels_nod2d(node)-1 do nz=nzmin+1,nzmax-1 dz_inv=1.0_WP/(Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) - shear = (Unode(1,nz-1,node)-Unode(1,nz,node))**2 +& - (Unode(2,nz-1,node)-Unode(2,nz,node))**2 + shear = (UVnode(1,nz-1,node)-UVnode(1,nz,node))**2 +& + (UVnode(2,nz-1,node)-UVnode(2,nz,node))**2 shear = shear*dz_inv*dz_inv Kv(nz,node) = shear/(shear+5._WP*max(bvfreq(nz,node),0.0_WP)+1.0e-14) ! To avoid NaNs at start end do diff --git a/src/oce_ale_pressure_bv.F90 b/src/oce_ale_pressure_bv.F90 index 0feb5cd0a..633c2f965 100644 --- a/src/oce_ale_pressure_bv.F90 +++ b/src/oce_ale_pressure_bv.F90 @@ -1,162 +1,273 @@ module densityJM_components_interface interface - subroutine densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + subroutine densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) USE MOD_MESH - real(kind=WP), intent(IN) :: t,s - real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rhopot - type(t_mesh), intent(in) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s + real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rhopot end subroutine end interface end module module density_linear_interface interface - subroutine density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, mesh) + subroutine density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, partit, mesh) USE MOD_MESH - real(kind=WP), intent(IN) :: t,s - real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rho_out - type(t_mesh), intent(in) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s + real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rho_out end subroutine end interface end module module pressure_force_4_linfs_fullcell_interface interface - subroutine pressure_force_4_linfs_fullcell(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_fullcell(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_linfs_nemo_interface interface - subroutine pressure_force_4_linfs_nemo(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module pressure_force_4_linfs_shchepetkin_interface interface - subroutine pressure_force_4_linfs_shchepetkin(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_linfs_easypgf_interface interface - subroutine pressure_force_4_linfs_easypgf(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + type(t_tracer), intent(in), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh end subroutine end interface end module module pressure_force_4_linfs_cubicspline_interface interface - subroutine pressure_force_4_linfs_cubicspline(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_cubicspline(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_linfs_cavity_interface interface - subroutine pressure_force_4_linfs_cavity(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_linfs_cavity(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_zxxxx_shchepetkin_interface interface - subroutine pressure_force_4_zxxxx_shchepetkin(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module pressure_force_4_zxxxx_easypgf_interface interface - subroutine pressure_force_4_zxxxx_easypgf(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module module pressure_force_4_zxxxx_cubicspline_interface interface - subroutine pressure_force_4_zxxxx_cubicspline(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module init_ref_density_interface interface - subroutine init_ref_density(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine init_ref_density(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module +module insitu2pot_interface + interface + subroutine insitu2pot(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + end subroutine + end interface +end module +module pressure_bv_interface + interface + subroutine pressure_bv(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + end subroutine + end interface +end module +module pressure_force_4_linfs_interface + interface + subroutine pressure_force_4_linfs(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + end subroutine + end interface +end module +module pressure_force_4_zxxxx_interface + interface + subroutine pressure_force_4_zxxxx(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers end subroutine end interface end module ! ! !=============================================================================== -subroutine pressure_bv(mesh) +subroutine pressure_bv(tracers, partit, mesh) ! fill in the hydrostatic pressure and the Brunt-Vaisala frequency ! in a single pass the using split form of the equation of state ! as proposed by NR use g_config USE o_PARAM USE MOD_MESH + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS - USE g_PARSUP - use i_arrays + USE g_support USE o_mixing_KPP_mod, only: dbsfc USE diagnostics, only: ldiag_dMOC use densityJM_components_interface use density_linear_interface IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: dz_inv, bv, a, rho_up, rho_dn, t, s - integer :: node, nz, nl1, nzmax, nzmin - real(kind=WP) :: rhopot(mesh%nl), bulk_0(mesh%nl), bulk_pz(mesh%nl), bulk_pz2(mesh%nl), rho(mesh%nl), dbsfc1(mesh%nl), db_max - real(kind=WP) :: bulk_up, bulk_dn, smallvalue, buoyancy_crit, rho_surf, aux_rho, aux_rho1 - real(kind=WP) :: sigma_theta_crit=0.125_WP !kg/m3, Levitus threshold for computing MLD2 - logical :: flag1, flag2, mixing_kpp -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + real(kind=WP) :: zmean, dz_inv, bv, a, rho_up, rho_dn, t, s + integer :: node, nz, nl1, nzmax, nzmin + real(kind=WP) :: rhopot(mesh%nl), bulk_0(mesh%nl), bulk_pz(mesh%nl), bulk_pz2(mesh%nl), rho(mesh%nl), dbsfc1(mesh%nl), db_max + real(kind=WP) :: bulk_up, bulk_dn, smallvalue, buoyancy_crit, rho_surf, aux_rho, aux_rho1 + real(kind=WP) :: sigma_theta_crit=0.125_WP !kg/m3, Levitus threshold for computing MLD2 + logical :: flag1, flag2, mixing_kpp + real(kind=WP), dimension(:,:), pointer :: temp, salt +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) smallvalue=1.0e-20 buoyancy_crit=0.0003_WP - mixing_kpp = (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) ! NR Evaluate string comparison outside the loop. It is expensive. -!!PS mixing_kpp = (trim(mix_scheme)=='KPP' .or. trim(mix_scheme)=='cvmix_KPP') ! NR Evaluate string comparison outside the loop. It is expensive. + mixing_kpp = (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) !___________________________________________________________________________ ! Screen salinity - a=0.0_WP + a =0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax) +!$OMP DO REDUCTION(min: a) do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) - !!PS do nz=1,nlevels_nod2d(node)-1 do nz=nzmin,nzmax-1 - a=min(a,tr_arr(nz,node,2)) + a=min(a, salt(nz,node)) enddo enddo - +!$OMP END DO +!$OMP END PARALLEL + !___________________________________________________________________________ - if(a<0.0_WP) then + ! model explodes, no OpenMP parallelization ! + if( a < 0.0_WP ) then write (*,*)' --> pressure_bv: s<0 happens!', a pe_status=1 do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) - !!PS do nz=1, nlevels_nod2d(node)-1 do nz=nzmin, nzmax-1 - if (tr_arr(nz, node, 2) < 0) write (*,*) 'the model blows up at n=', mylist_nod2D(node), ' ; ', 'nz=', nz + if (salt(nz, node) < 0) write (*,*) 'the model blows up at n=', mylist_nod2D(node), ' ; ', 'nz=', nz end do end do endif !___________________________________________________________________________ + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(zmean, dz_inv, bv, a, rho_up, rho_dn, t, s, node, nz, nl1, nzmax, nzmin, & +!$OMP rhopot, bulk_0, bulk_pz, bulk_pz2, rho, dbsfc1, db_max, bulk_up, bulk_dn, & +!$OMP rho_surf, aux_rho, aux_rho1, flag1, flag2) +!$OMP DO do node=1, myDim_nod2D+eDim_nod2D nzmin = ulevels_nod2D(node) nzmax = nlevels_nod2D(node) @@ -175,16 +286,16 @@ subroutine pressure_bv(mesh) !_______________________________________________________________________ ! apply equation of state do nz=nzmin, nzmax-1 - t=tr_arr(nz, node,1) - s=tr_arr(nz, node,2) + t=temp(nz, node) + s=salt(nz, node) select case(state_equation) case(0) - call density_linear(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), mesh) + call density_linear(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case(1) - call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), mesh) + call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select end do @@ -238,17 +349,17 @@ subroutine pressure_bv(mesh) ! like at the cavity-ocean interface --> compute water mass density that ! is replaced by the cavity if (nzmin>1) then - t=tr_arr(nzmin, node,1) - s=tr_arr(nzmin, node,2) + t=temp(nzmin, node) + s=salt(nzmin, node) do nz=1, nzmin-1 select case(state_equation) case(0) - call density_linear(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), mesh) + call density_linear(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case(1) - call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), mesh) + call densityJM_components(t, s, bulk_0(nz), bulk_pz(nz), bulk_pz2(nz), rhopot(nz), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select !_______________________________________________________________ rho(nz)= bulk_0(nz) + Z_3d_n(nz,node)*(bulk_pz(nz) + Z_3d_n(nz,node)*bulk_pz2(nz)) @@ -313,24 +424,21 @@ subroutine pressure_bv(mesh) flag1=.true. flag2=.true. do nz=nzmin+1,nzmax-1 - bulk_up = bulk_0(nz-1) + zbar_3d_n(nz,node)*(bulk_pz(nz-1) + zbar_3d_n(nz,node)*bulk_pz2(nz-1)) - bulk_dn = bulk_0(nz) + zbar_3d_n(nz,node)*(bulk_pz(nz) + zbar_3d_n(nz,node)*bulk_pz2(nz)) - rho_up = bulk_up*rhopot(nz-1) / (bulk_up + 0.1_WP*zbar_3d_n(nz,node)*real(state_equation)) - rho_dn = bulk_dn*rhopot(nz) / (bulk_dn + 0.1_WP*zbar_3d_n(nz,node)*real(state_equation)) - dz_inv=1.0_WP/(Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) - + zmean = 0.5_WP*sum(Z_3d_n(nz-1:nz, node)) + bulk_up = bulk_0(nz-1) + zmean*(bulk_pz(nz-1) + zmean*bulk_pz2(nz-1)) + bulk_dn = bulk_0(nz) + zmean*(bulk_pz(nz) + zmean*bulk_pz2(nz)) + rho_up = bulk_up*rhopot(nz-1) / (bulk_up + 0.1_WP*zmean*real(state_equation)) + rho_dn = bulk_dn*rhopot(nz) / (bulk_dn + 0.1_WP*zmean*real(state_equation)) + dz_inv = 1.0_WP/(Z_3d_n(nz-1,node)-Z_3d_n(nz,node)) !_______________________________________________________________ ! squared brunt väisälä frequence N^2 --> N^2>0 stratification is ! stable, vertical elongated parcel is accelaratedtowards ! initial point --> does oscillation with frequency N. ! N^2<0 stratification is unstable vertical elongated parcel is ! accelerated away from initial point - bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/density_0 -!!PS bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/density_ref(nz,node) - + bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/density_0 !!PS !--> Why not like this ? - !!PS bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/(rho_dn) - + !!PS bvfreq(nz,node) = -g*dz_inv*(rho_up-rho_dn)/(rho_dn) !_______________________________________________________________ ! define MLD following Large et al. 1997 ! MLD is the shallowest depth where the local buoyancy gradient matches the maximum buoyancy gradient @@ -359,19 +467,25 @@ subroutine pressure_bv(mesh) ! The mixed layer depth ! mixlay_depth ! bv_ref + !_______________________________________________________________________ + ! BV is defined on full levels except for the first and the last ones. end do - !_______________________________________________________________________ - ! BV is defined on full levels except for the first and the last ones. +!$OMP END DO +!$OMP END PARALLEL +call smooth_nod (bvfreq, 1, partit, mesh) +!$OMP BARRIER end subroutine pressure_bv ! ! ! !=============================================================================== ! Calculate pressure gradient force (PGF) for linear free surface case -subroutine pressure_force_4_linfs(mesh) +subroutine pressure_force_4_linfs(tracers, partit, mesh) use g_config - use g_PARSUP use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer use pressure_force_4_linfs_fullcell_interface use pressure_force_4_linfs_nemo_interface use pressure_force_4_linfs_shchepetkin_interface @@ -379,19 +493,24 @@ subroutine pressure_force_4_linfs(mesh) use pressure_force_4_linfs_cavity_interface use pressure_force_4_linfs_easypgf_interface implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + real(kind=WP), dimension(:,:), pointer :: temp, salt + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ ! calculate pressure gradient force (PGF) for linfs with full cells if ( .not. use_partial_cell .and. .not. use_cavity_partial_cell) then - call pressure_force_4_linfs_fullcell(mesh) + call pressure_force_4_linfs_fullcell(partit, mesh) elseif (use_cavity .and. use_cavity_partial_cell ) then if (trim(which_pgf)=='sergey') then - call pressure_force_4_linfs_cavity(mesh) + call pressure_force_4_linfs_cavity(partit, mesh) elseif (trim(which_pgf)=='shchepetkin') then - call pressure_force_4_linfs_shchepetkin(mesh) + call pressure_force_4_linfs_shchepetkin(partit, mesh) elseif (trim(which_pgf)=='easypgf') then - call pressure_force_4_linfs_easypgf(mesh) + call pressure_force_4_linfs_easypgf(tracers, partit, mesh) else write(*,*) '________________________________________________________' write(*,*) ' --> ERROR: the choosen form of pressure gradient ' @@ -399,20 +518,20 @@ subroutine pressure_force_4_linfs(mesh) write(*,*) ' see in namelist.oce --> which_pgf = sergey, ' write(*,*) ' shchepetkin, easypgf ' write(*,*) '________________________________________________________' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end if !___________________________________________________________________________ ! calculate pressure gradient force (PGF) for linfs with partiall cells else ! --> (trim(which_ale)=='linfs' .and. use_partial_cell ) if (trim(which_pgf)=='nemo') then - call pressure_force_4_linfs_nemo(mesh) + call pressure_force_4_linfs_nemo(tracers, partit, mesh) elseif (trim(which_pgf)=='shchepetkin') then - call pressure_force_4_linfs_shchepetkin(mesh) + call pressure_force_4_linfs_shchepetkin(partit, mesh) elseif (trim(which_pgf)=='cubicspline') then - call pressure_force_4_linfs_cubicspline(mesh) + call pressure_force_4_linfs_cubicspline(partit, mesh) elseif (trim(which_pgf)=='easypgf') then - call pressure_force_4_linfs_easypgf(mesh) + call pressure_force_4_linfs_easypgf(tracers, partit, mesh) else write(*,*) '________________________________________________________' write(*,*) ' --> ERROR: the choosen form of pressure gradient ' @@ -420,7 +539,7 @@ subroutine pressure_force_4_linfs(mesh) write(*,*) ' see in namelist.oce --> which_pgf = nemo, ' write(*,*) ' shchepetkin, cubicspline ' write(*,*) '________________________________________________________' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end if end if end subroutine pressure_force_4_linfs @@ -429,21 +548,25 @@ end subroutine pressure_force_4_linfs ! !=============================================================================== ! calculate pressure gradient force for linfs in case full cells -subroutine pressure_force_4_linfs_fullcell(mesh) +subroutine pressure_force_4_linfs_fullcell(partit, mesh) use o_PARAM use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nlz - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nlz -#include "associate_mesh.h" - +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz) do elem=1, myDim_elem2D !_______________________________________________________________________ ! number of levels at elem @@ -463,6 +586,7 @@ subroutine pressure_force_4_linfs_fullcell(mesh) pgf_y(nlz,elem) = sum(gradient_sca(4:6,elem)*hpressure(nlz,elnodes)/density_0) end do end do !-->do elem=1, myDim_elem2D +!$OMP END PARALLEL DO end subroutine pressure_force_4_linfs_fullcell ! ! @@ -476,26 +600,41 @@ end subroutine pressure_force_4_linfs_fullcell ! Calculate pressure gradient force (PGF) like in NEMO based on NEMO ocean engine ! Gurvan Madec, and the NEMO team gurvan.madec@locean-ipsl.umpc.fr, nemo st@locean-ipsl.umpc.fr ! November 2015, – version 3.6 stable – -subroutine pressure_force_4_linfs_nemo(mesh) +subroutine pressure_force_4_linfs_nemo(tracers, partit, mesh) use o_PARAM use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER use o_ARRAYS - use g_PARSUP use g_config use densityJM_components_interface use density_linear_interface implicit none - - logical :: do_interpTS=.true. - integer :: elem, elnodes(3), nle, ule, nlz, nln(3), uln(3), ni, nlc, nlce - real(kind=WP) :: hpress_n_bottom(3) - real(kind=WP) :: interp_n_dens(3), interp_n_temp, interp_n_salt, & - dZn, dZn_i, dh, dval, mean_e_rho,dZn_rho_grad(2) - real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + logical :: do_interpTS=.true. + integer :: elem, elnodes(3), nle, ule, nlz, nln(3), uln(3), ni, nlc, nlce + real(kind=WP) :: hpress_n_bottom(3) + real(kind=WP) :: interp_n_dens(3), interp_n_temp, interp_n_salt, & + dZn, dZn_i, dh, dval, mean_e_rho,dZn_rho_grad(2) + real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2 + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) + real(kind=WP), dimension(:,:), pointer :: temp, salt +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, nln, uln, ni, nlc, nlce, hpress_n_bottom, & +!$OMP interp_n_dens, interp_n_temp, interp_n_salt, dZn, dZn_i, dh, dval, & +!$OMP mean_e_rho, dZn_rho_grad, rhopot, bulk_0, bulk_pz, bulk_pz2, & +!$OMP zbar_n, z_n) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -589,22 +728,22 @@ subroutine pressure_force_4_linfs_nemo(mesh) !! state ... !else ! ... interpolate temperature and saltinity ... - dval = tr_arr(nlc, elnodes(ni),1) - tr_arr(nlc-1,elnodes(ni),1) - interp_n_temp = tr_arr(nlc-1,elnodes(ni),1) + (dval/dZn*dZn_i) - dval = tr_arr(nlc ,elnodes(ni),2) - tr_arr(nlc-1,elnodes(ni),2) - interp_n_salt = tr_arr(nlc-1,elnodes(ni),2) + (dval/dZn*dZn_i) + dval = temp(nlc, elnodes(ni)) - temp(nlc-1,elnodes(ni)) + interp_n_temp = temp(nlc-1,elnodes(ni)) + (dval/dZn*dZn_i) + dval = salt(nlc ,elnodes(ni)) - salt(nlc-1,elnodes(ni)) + interp_n_salt = salt(nlc-1,elnodes(ni)) + (dval/dZn*dZn_i) ! calculate density at element mid-depth bottom depth via ! equation of state from linear interpolated temperature and ! salinity select case(state_equation) case(0) - call density_linear(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call density_linear(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) case(1) - call densityJM_components(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call densityJM_components(interp_n_temp, interp_n_salt, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select interp_n_dens(ni) = bulk_0 + Z_n(nle)*(bulk_pz + Z_n(nle)*bulk_pz2) !!PS interp_n_dens(ni) = interp_n_dens(ni)*rhopot/(interp_n_dens(ni)+0.1_WP*Z_n(nle))*real(state_equation))-density_0 @@ -632,6 +771,8 @@ subroutine pressure_force_4_linfs_nemo(mesh) pgf_y(nle,elem) = sum(gradient_sca(4:6,elem)*hpress_n_bottom)/density_0 end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_nemo ! ! @@ -644,21 +785,29 @@ end subroutine pressure_force_4_linfs_nemo ! --> based on density jacobian method ... ! calculate PGF for linfs with partiell cell on/off ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_linfs_shchepetkin(mesh) +subroutine pressure_force_4_linfs_shchepetkin(partit, mesh) use o_PARAM use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nlz, idx(3),ni - real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nlz, idx(3), ni + real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, idx, ni, int_dp_dx, drho_dx, dz_dx, aux_sum, & +!$OMP dx10, dx20, dx21, df10, df21, drho_dz, zbar_n, z_n) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -888,6 +1037,8 @@ subroutine pressure_force_4_linfs_shchepetkin(mesh) pgf_y(nlz,elem) = int_dp_dx(2) + aux_sum*0.5_WP end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_shchepetkin ! ! @@ -895,29 +1046,43 @@ end subroutine pressure_force_4_linfs_shchepetkin !=============================================================================== ! Calculate pressure gradient force (PGF) ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_linfs_easypgf(mesh) +subroutine pressure_force_4_linfs_easypgf(tracers, partit, mesh) use o_PARAM use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER use o_ARRAYS - use g_PARSUP use g_config use densityJM_components_interface use density_linear_interface implicit none - - integer :: elem, elnodes(3), nle, ule, nlz, idx(3),ni - real(kind=WP) :: int_dp_dx(2), drho_dx, aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3) - real(kind=WP) :: t0(3), dt10(3), dt21(3) - real(kind=WP) :: s0(3), ds10(3), ds21(3) - real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref - real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) - real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 - - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + integer :: elem, elnodes(3), nle, ule, nlz, idx(3),ni + real(kind=WP) :: int_dp_dx(2), drho_dx, aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3) + real(kind=WP) :: t0(3), dt10(3), dt21(3) + real(kind=WP) :: s0(3), ds10(3), ds21(3) + real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref + real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) + real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) + real(kind=WP), dimension(:,:), pointer :: temp, salt +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) + !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, idx, ni, int_dp_dx, drho_dx, aux_sum, dx10, dx20, dx21, t0, dt10, dt21, s0, ds10, ds21, & +!$OMP rho_at_Zn, temp_at_Zn, salt_at_Zn, drho_dz, aux_dref, rhopot, bulk_0, bulk_pz, bulk_pz2, dref_rhopot, dref_bulk_0, & +!$OMP dref_bulk_pz, dref_bulk_pz2, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -945,12 +1110,12 @@ subroutine pressure_force_4_linfs_easypgf(mesh) if (use_cavity .and. .not. use_density_ref) then select case(state_equation) case(0) - call density_linear(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, mesh) + call density_linear(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case(1) - call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, mesh) + call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select end if @@ -1014,13 +1179,13 @@ subroutine pressure_force_4_linfs_easypgf(mesh) dx21(ni) = Z_3d_n(nlz+2,elnodes(ni))-Z_3d_n(nlz+1,elnodes(ni)) dx20(ni) = Z_3d_n(nlz+2,elnodes(ni))-Z_3d_n(nlz ,elnodes(ni)) - t0(ni) = tr_arr(nlz ,elnodes(ni),1) - dt10(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+2,elnodes(ni),1)-tr_arr(nlz+1,elnodes(ni),1) + t0(ni) = temp(nlz ,elnodes(ni)) + dt10(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) + dt21(ni) = temp(nlz+2,elnodes(ni))-temp(nlz+1,elnodes(ni)) - s0(ni) = tr_arr(nlz ,elnodes(ni),2) - ds10(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+2,elnodes(ni),2)-tr_arr(nlz+1,elnodes(ni),2) + s0(ni) = salt(nlz ,elnodes(ni)) + ds10(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) + ds21(ni) = salt(nlz+2,elnodes(ni))-salt(nlz+1,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -1035,12 +1200,12 @@ subroutine pressure_force_4_linfs_easypgf(mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1053,13 +1218,13 @@ subroutine pressure_force_4_linfs_easypgf(mesh) dx21(ni) = Z_3d_n(nlz+1,elnodes(ni))-Z_3d_n(nlz ,elnodes(ni)) dx20(ni) = Z_3d_n(nlz+1,elnodes(ni))-Z_3d_n(nlz-1,elnodes(ni)) - t0(ni) = tr_arr(nlz-1,elnodes(ni),1) - dt10(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) + t0(ni) = temp(nlz-1,elnodes(ni)) + dt10(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) + dt21(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) - s0(ni) = tr_arr(nlz-1,elnodes(ni),2) - ds10(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) + s0(ni) = salt(nlz-1,elnodes(ni)) + ds10(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + ds21(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -1074,12 +1239,12 @@ subroutine pressure_force_4_linfs_easypgf(mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1156,14 +1321,14 @@ subroutine pressure_force_4_linfs_easypgf(mesh) dx21(ni) = Z_3d_n(nlz ,elnodes(ni))-Z_3d_n(nlz-1,elnodes(ni)) dx20(ni) = Z_3d_n(nlz ,elnodes(ni))-Z_3d_n(nlz-2,elnodes(ni)) - t0(ni) = tr_arr(nlz-2,elnodes(ni),1) - dt10(ni) = tr_arr(nlz-1,elnodes(ni),1)-tr_arr(nlz-2,elnodes(ni),1) - dt21(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) + t0(ni) = temp(nlz-2,elnodes(ni)) + dt10(ni) = temp(nlz-1,elnodes(ni))-temp(nlz-2,elnodes(ni)) + dt21(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) - s0(ni) = tr_arr(nlz-2,elnodes(ni),2) - ds10(ni) = tr_arr(nlz-1,elnodes(ni),2)-tr_arr(nlz-2,elnodes(ni),2) - ds21(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - !___________________________________________________________________ + s0(ni) = salt(nlz-2,elnodes(ni)) + ds10(ni) = salt(nlz-1,elnodes(ni))-salt(nlz-2,elnodes(ni)) + ds21(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + !_________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & + dt10(ni)/dx10(ni)*(Z_n(nlz)-Z_3d_n(nlz-2,elnodes(ni))) & @@ -1177,12 +1342,12 @@ subroutine pressure_force_4_linfs_easypgf(mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1195,13 +1360,13 @@ subroutine pressure_force_4_linfs_easypgf(mesh) dx21(ni) = Z_3d_n(nlz+1,elnodes(ni))-Z_3d_n(nlz ,elnodes(ni)) dx20(ni) = Z_3d_n(nlz+1,elnodes(ni))-Z_3d_n(nlz-1,elnodes(ni)) - t0(ni) = tr_arr(nlz-1,elnodes(ni),1) - dt10(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) + t0(ni) = temp(nlz-1,elnodes(ni)) + dt10(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) + dt21(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) - s0(ni) = tr_arr(nlz-1,elnodes(ni),2) - ds10(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) + s0(ni) = salt(nlz-1,elnodes(ni)) + ds10(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + ds21(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -1216,12 +1381,12 @@ subroutine pressure_force_4_linfs_easypgf(mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -1242,6 +1407,8 @@ subroutine pressure_force_4_linfs_easypgf(mesh) pgf_y(nlz,elem) = int_dp_dx(2) + aux_sum*0.5_WP end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_easypgf ! ! @@ -1249,25 +1416,34 @@ end subroutine pressure_force_4_linfs_easypgf !=============================================================================== ! Calculate pressure gradient force (PGF) via cubicspline used in FEOSM1.4 ! First coded by Q. Wang for FESOM1.4, adapted by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_linfs_cubicspline(mesh) +subroutine pressure_force_4_linfs_cubicspline(partit, mesh) use o_PARAM use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nlz, nlc, ni, node, nln(3), uln(3), dd - real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dz, auxp - real(kind=WP) :: dx10, dx20, dx21, df10, df21 - real(kind=WP) :: interp_n_dens(3) - integer :: s_ind(4) - real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, s_dup, s_dlo - real(kind=WP) :: a, b, c, d, dz - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nlz, nlc, ni, node, nln(3), uln(3), dd + real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dz, auxp + real(kind=WP) :: dx10, dx20, dx21, df10, df21 + real(kind=WP) :: interp_n_dens(3) + integer :: s_ind(4) + real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, s_dup, s_dlo + real(kind=WP) :: a, b, c, d, dz + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, nlc, ni, node, nln, uln, dd, int_dp_dx, drho_dx, dz_dx, drho_dz, auxp, & +!$OMP dx10, dx20, dx21, df10, df21, interp_n_dens, s_ind, s_z, s_dens, s_H, aux1, aux2, s_dup, s_dlo, & +!$OMP a, b, c, d, dz, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -1441,6 +1617,8 @@ subroutine pressure_force_4_linfs_cubicspline(mesh) int_dp_dx(2) = int_dp_dx(2) + auxp end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_cubicspline ! ! @@ -1448,23 +1626,31 @@ end subroutine pressure_force_4_linfs_cubicspline !=============================================================================== ! calculate pressure gradient force for linfs in case cavities are used with ! surface partial cells or bottom partial cells -subroutine pressure_force_4_linfs_cavity(mesh) +subroutine pressure_force_4_linfs_cavity(partit, mesh) use o_PARAM use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nlz, idx(3), ni - real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nlz, idx(3), ni + real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3), drho_dz(3) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, idx, ni, int_dp_dx, drho_dx, dz_dx, aux_sum, & +!$OMP dx10, dx20, dx21, df10, df21, drho_dz, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! number of levels at elem @@ -1652,28 +1838,34 @@ subroutine pressure_force_4_linfs_cavity(mesh) end if end do !-->do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_linfs_cavity ! ! ! !=============================================================================== ! Calculate pressure gradient force (PGF) for full free surface case zlevel and zstar -subroutine pressure_force_4_zxxxx(mesh) - use g_PARSUP - use g_config +subroutine pressure_force_4_zxxxx(tracers, partit, mesh) use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use g_config use pressure_force_4_zxxxx_shchepetkin_interface use pressure_force_4_zxxxx_cubicspline_interface use pressure_force_4_zxxxx_easypgf_interface implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers !___________________________________________________________________________ if (trim(which_pgf)=='shchepetkin') then - call pressure_force_4_zxxxx_shchepetkin(mesh) + call pressure_force_4_zxxxx_shchepetkin(partit, mesh) elseif (trim(which_pgf)=='cubicspline') then - call pressure_force_4_zxxxx_cubicspline(mesh) + call pressure_force_4_zxxxx_cubicspline(partit, mesh) elseif (trim(which_pgf)=='easypgf' ) then - call pressure_force_4_zxxxx_easypgf(mesh) + call pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) else write(*,*) '________________________________________________________' write(*,*) ' --> ERROR: the choosen form of pressure gradient ' @@ -1682,8 +1874,8 @@ subroutine pressure_force_4_zxxxx(mesh) write(*,*) ' see in namelist.oce --> which_pgf = ' write(*,*) ' shchepetkin, cubicspline, easypgf ' write(*,*) '________________________________________________________' - call par_ex(1) - end if + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) + end if end subroutine pressure_force_4_zxxxx ! ! @@ -1694,24 +1886,32 @@ end subroutine pressure_force_4_zxxxx ! interpolation. ! First coded by Q. Wang for FESOM1.4, adapted by P. Scholz for FESOM2.0 ! 26.04.2018 -subroutine pressure_force_4_zxxxx_cubicspline(mesh) +subroutine pressure_force_4_zxxxx_cubicspline(partit, mesh) use o_PARAM use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS - use g_PARSUP use g_config implicit none - - integer :: elem, elnodes(3), nle, ule, nln(3), uln(3), nlz, nlc,dd - integer :: ni, node, dens_ind,kk - real(kind=WP) :: ze - integer :: s_ind(4) - real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, aux(2), s_dup, s_dlo - real(kind=WP) :: a, b, c, d, dz, rho_n(3), rhograd_e(2), p_grad(2) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nln(3), uln(3), nlz, nlc, dd + integer :: ni, node, dens_ind, kk + real(kind=WP) :: ze + integer :: s_ind(4) + real(kind=WP) :: s_z(4), s_dens(4), s_H, aux1, aux2, aux(2), s_dup, s_dlo + real(kind=WP) :: a, b, c, d, dz, rho_n(3), rhograd_e(2), p_grad(2) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nln, uln, nlz, nlc, dd, ni, node, dens_ind, kk, ze, s_ind, s_z, s_dens, s_H, & +!$OMP aux1, aux2, aux, s_dup, s_dlo, a, b, c, d, dz, rho_n, rhograd_e, p_grad, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D ule = ulevels(elem) nle = nlevels(elem)-1 @@ -1863,6 +2063,8 @@ subroutine pressure_force_4_zxxxx_cubicspline(mesh) end do ! --> do nlz=1,nle end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_zxxxx_cubicspline ! ! @@ -1875,25 +2077,33 @@ end subroutine pressure_force_4_zxxxx_cubicspline ! --> based on density jacobian method ... ! calculate PGF for linfs with partiell cell on/off ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_zxxxx_shchepetkin(mesh) +subroutine pressure_force_4_zxxxx_shchepetkin(partit, mesh) use o_PARAM use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_ARRAYS - use g_PARSUP use g_config use densityJM_components_interface use density_linear_interface implicit none - - integer :: elem, elnodes(3), nle,ule, nlz, nln(3), ni, nlc, nlce, idx(3) - real(kind=WP) :: int_dp_dx(2), drho_dx, drho_dy, drho_dz(3), dz_dx, dz_dy, aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3) - real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: elem, elnodes(3), nle, ule, nlz, nln(3), ni, nlc, nlce, idx(3) + real(kind=WP) :: int_dp_dx(2), drho_dx, drho_dy, drho_dz(3), dz_dx, dz_dy, aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3), df10(3), df21(3) + real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, nln, ni, nlc, nlce, idx, int_dp_dx, drho_dx, drho_dy, drho_dz, dz_dx, dz_dy, aux_sum, & +!$OMP dx10, dx20, dx21, df10, df21, rhopot, bulk_0, bulk_pz, bulk_pz2, zbar_n, z_n) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -2100,7 +2310,8 @@ subroutine pressure_force_4_zxxxx_shchepetkin(mesh) pgf_y(nlz,elem) = int_dp_dx(2) + aux_sum*0.5_WP end do ! --> do elem=1, myDim_elem2D - +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_zxxxx_shchepetkin ! ! @@ -2113,29 +2324,43 @@ end subroutine pressure_force_4_zxxxx_shchepetkin ! --> based on density jacobian method ... ! calculate PGF for linfs with partiell cell on/off ! First coded by P. Scholz for FESOM2.0, 08.02.2019 -subroutine pressure_force_4_zxxxx_easypgf(mesh) +subroutine pressure_force_4_zxxxx_easypgf(tracers, partit, mesh) use o_PARAM use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER use o_ARRAYS - use g_PARSUP use g_config use densityJM_components_interface use density_linear_interface implicit none - - integer :: elem, elnodes(3), nle,ule, nlz, nln(3), ni, nlc, nlce, idx(3) - real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dy, dz_dy,aux_sum - real(kind=WP) :: dx10(3), dx20(3), dx21(3) - real(kind=WP) :: f0(3), df10(3), df21(3) - real(kind=WP) :: t0(3), dt10(3), dt21(3) - real(kind=WP) :: s0(3), ds10(3), ds21(3) - real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref - real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) - real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + integer :: elem, elnodes(3), nle, ule, nlz, nln(3), ni, nlc, nlce, idx(3) + real(kind=WP) :: int_dp_dx(2), drho_dx, dz_dx, drho_dy, dz_dy, aux_sum + real(kind=WP) :: dx10(3), dx20(3), dx21(3) + real(kind=WP) :: f0(3), df10(3), df21(3) + real(kind=WP) :: t0(3), dt10(3), dt21(3) + real(kind=WP) :: s0(3), ds10(3), ds21(3) + real(kind=WP) :: rho_at_Zn(3), temp_at_Zn(3), salt_at_Zn(3), drho_dz(3), aux_dref + real(kind=WP) :: rhopot(3), bulk_0(3), bulk_pz(3), bulk_pz2(3) + real(kind=WP) :: dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2 + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) + real(kind=WP), dimension(:,:), pointer :: temp, salt +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) !___________________________________________________________________________ ! loop over triangular elemments +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(elem, elnodes, nle, ule, nlz, nln, ni, nlc, nlce, idx, int_dp_dx, drho_dx, drho_dy, dz_dx, dz_dy, aux_sum, dx10, dx20, dx21, & +!$OMP f0, df10, df21, t0, dt10, dt21, s0, ds10, ds21, rho_at_Zn, temp_at_Zn, salt_at_Zn, drho_dz, aux_dref, rhopot, & +!$OMP bulk_0, bulk_pz, bulk_pz2, dref_rhopot, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, zbar_n, z_n ) +!$OMP DO do elem=1, myDim_elem2D !_______________________________________________________________________ ! nle...number of mid-depth levels at elem @@ -2163,12 +2388,12 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) if (use_cavity .and. .not. use_density_ref) then select case(state_equation) case(0) - call density_linear(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, mesh) + call density_linear(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case(1) - call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, mesh) + call densityJM_components(density_ref_T, density_ref_S, dref_bulk_0, dref_bulk_pz, dref_bulk_pz2, dref_rhopot, partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select end if @@ -2221,13 +2446,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10(ni) = density_m_rho0(nlz+1,elnodes(ni))-density_m_rho0(nlz ,elnodes(ni)) !!PS df21(ni) = density_m_rho0(nlz+2,elnodes(ni))-density_m_rho0(nlz+1,elnodes(ni)) - t0(ni) = tr_arr(nlz ,elnodes(ni),1) - dt10(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+2,elnodes(ni),1)-tr_arr(nlz+1,elnodes(ni),1) + t0(ni) = temp(nlz ,elnodes(ni)) + dt10(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) + dt21(ni) = temp(nlz+2,elnodes(ni))-temp(nlz+1,elnodes(ni)) - s0(ni) = tr_arr(nlz ,elnodes(ni),2) - ds10(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+2,elnodes(ni),2)-tr_arr(nlz+1,elnodes(ni),2) + s0(ni) = salt(nlz ,elnodes(ni)) + ds10(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) + ds21(ni) = salt(nlz+2,elnodes(ni))-salt(nlz+1,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -2242,12 +2467,12 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2270,13 +2495,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10(ni) = density_m_rho0(nlz ,elnodes(ni))-density_m_rho0(nlz-1,elnodes(ni)) !!PS df21(ni) = density_m_rho0(nlz+1,elnodes(ni))-density_m_rho0(nlz ,elnodes(ni)) - t0(ni) = tr_arr(nlz-1,elnodes(ni),1) - dt10(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) + t0(ni) = temp(nlz-1,elnodes(ni)) + dt10(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) + dt21(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) - s0(ni) = tr_arr(nlz-1,elnodes(ni),2) - ds10(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) + s0(ni) = salt(nlz-1,elnodes(ni)) + ds10(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + ds21(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -2291,12 +2516,12 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2310,7 +2535,7 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) ! --> this is not wanted !!! write(*,*) ' --> would do second order surface boundary density extrapolation' write(*,*) ' This is not wanted, model stops here' - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if end do !_______________________________________________________________________ @@ -2348,13 +2573,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10 = density_m_rho0(nlz ,elnodes)-density_m_rho0(nlz-1,elnodes) !!PS df21 = density_m_rho0(nlz+1,elnodes)-density_m_rho0(nlz ,elnodes) - t0 = tr_arr(nlz-1,elnodes,1) - dt10 = tr_arr(nlz ,elnodes,1)-tr_arr(nlz-1,elnodes,1) - dt21 = tr_arr(nlz+1,elnodes,1)-tr_arr(nlz ,elnodes,1) + t0 = temp(nlz-1,elnodes) + dt10 = temp(nlz ,elnodes)-temp(nlz-1,elnodes) + dt21 = temp(nlz+1,elnodes)-temp(nlz ,elnodes) - s0 = tr_arr(nlz-1,elnodes,2) - ds10 = tr_arr(nlz ,elnodes,2)-tr_arr(nlz-1,elnodes,2) - ds21 = tr_arr(nlz+1,elnodes,2)-tr_arr(nlz ,elnodes,2) + s0 = salt(nlz-1,elnodes) + ds10 = salt(nlz ,elnodes)-salt(nlz-1,elnodes) + ds21 = salt(nlz+1,elnodes)-salt(nlz ,elnodes) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn = t0 & @@ -2371,16 +2596,16 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(1), salt_at_Zn(1), bulk_0(1), bulk_pz(1), bulk_pz2(1), rhopot(1), mesh) - call density_linear(temp_at_Zn(2), salt_at_Zn(2), bulk_0(2), bulk_pz(2), bulk_pz2(2), rhopot(2), mesh) - call density_linear(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), mesh) + call density_linear(temp_at_Zn(1), salt_at_Zn(1), bulk_0(1), bulk_pz(1), bulk_pz2(1), rhopot(1), partit, mesh) + call density_linear(temp_at_Zn(2), salt_at_Zn(2), bulk_0(2), bulk_pz(2), bulk_pz2(2), rhopot(2), partit, mesh) + call density_linear(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(1), salt_at_Zn(1), bulk_0(1), bulk_pz(1), bulk_pz2(1), rhopot(1), mesh) - call densityJM_components(temp_at_Zn(2), salt_at_Zn(2), bulk_0(2), bulk_pz(2), bulk_pz2(2), rhopot(2), mesh) - call densityJM_components(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), mesh) + call densityJM_components(temp_at_Zn(1), salt_at_Zn(1), bulk_0(1), bulk_pz(1), bulk_pz2(1), rhopot(1), partit, mesh) + call densityJM_components(temp_at_Zn(2), salt_at_Zn(2), bulk_0(2), bulk_pz(2), bulk_pz2(2), rhopot(2), partit, mesh) + call densityJM_components(temp_at_Zn(3), salt_at_Zn(3), bulk_0(3), bulk_pz(3), bulk_pz2(3), rhopot(3), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn = bulk_0 + Z_n(nlz)*(bulk_pz + Z_n(nlz)*bulk_pz2) rho_at_Zn = rho_at_Zn*rhopot/(rho_at_Zn+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2435,13 +2660,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10(ni) = density_m_rho0(nlz-1,elnodes(ni))-density_m_rho0(nlz-2,elnodes(ni)) !!PS df21(ni) = density_m_rho0(nlz ,elnodes(ni))-density_m_rho0(nlz-1,elnodes(ni)) - t0(ni) = tr_arr(nlz-2,elnodes(ni),1) - dt10(ni) = tr_arr(nlz-1,elnodes(ni),1)-tr_arr(nlz-2,elnodes(ni),1) - dt21(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) + t0(ni) = temp(nlz-2,elnodes(ni)) + dt10(ni) = temp(nlz-1,elnodes(ni))-temp(nlz-2,elnodes(ni)) + dt21(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) - s0(ni) = tr_arr(nlz-2,elnodes(ni),2) - ds10(ni) = tr_arr(nlz-1,elnodes(ni),2)-tr_arr(nlz-2,elnodes(ni),2) - ds21(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) + s0(ni) = salt(nlz-2,elnodes(ni)) + ds10(ni) = salt(nlz-1,elnodes(ni))-salt(nlz-2,elnodes(ni)) + ds21(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -2456,12 +2681,12 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2484,13 +2709,13 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) !!PS df10 = density_m_rho0(nlz ,elnodes)-density_m_rho0(nlz-1,elnodes) !!PS df21 = density_m_rho0(nlz+1,elnodes)-density_m_rho0(nlz ,elnodes) - t0(ni) = tr_arr(nlz-1,elnodes(ni),1) - dt10(ni) = tr_arr(nlz ,elnodes(ni),1)-tr_arr(nlz-1,elnodes(ni),1) - dt21(ni) = tr_arr(nlz+1,elnodes(ni),1)-tr_arr(nlz ,elnodes(ni),1) + t0(ni) = temp(nlz-1,elnodes(ni)) + dt10(ni) = temp(nlz ,elnodes(ni))-temp(nlz-1,elnodes(ni)) + dt21(ni) = temp(nlz+1,elnodes(ni))-temp(nlz ,elnodes(ni)) - s0(ni) = tr_arr(nlz-1,elnodes(ni),2) - ds10(ni) = tr_arr(nlz ,elnodes(ni),2)-tr_arr(nlz-1,elnodes(ni),2) - ds21(ni) = tr_arr(nlz+1,elnodes(ni),2)-tr_arr(nlz ,elnodes(ni),2) + s0(ni) = salt(nlz-1,elnodes(ni)) + ds10(ni) = salt(nlz ,elnodes(ni))-salt(nlz-1,elnodes(ni)) + ds21(ni) = salt(nlz+1,elnodes(ni))-salt(nlz ,elnodes(ni)) !___________________________________________________________________ ! interpoalte vertice temp and salinity to elemental level Z_n temp_at_Zn(ni) = t0(ni) & @@ -2505,12 +2730,12 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) ! compute density from state equation select case(state_equation) case(0) - call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call density_linear(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case(1) - call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), mesh) + call densityJM_components(temp_at_Zn(ni), salt_at_Zn(ni), bulk_0(ni), bulk_pz(ni), bulk_pz2(ni), rhopot(ni), partit, mesh) case default !unknown if (mype==0) write(*,*) 'Wrong type of the equation of state. Check your namelists.' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) end select rho_at_Zn(ni) = bulk_0(ni) + Z_n(nlz)*(bulk_pz(ni) + Z_n(nlz)*bulk_pz2(ni)) rho_at_Zn(ni) = rho_at_Zn(ni)*rhopot(ni)/(rho_at_Zn(ni)+0.1_WP*Z_n(nlz)*real(state_equation))-aux_dref @@ -2527,7 +2752,7 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) write(*,*) ' idx = ', idx write(*,*) ' nle = ', nle write(*,*) ' nln = ', nlevels_nod2D(elnodes)-1 - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if end do !_______________________________________________________________________ @@ -2543,16 +2768,19 @@ subroutine pressure_force_4_zxxxx_easypgf(mesh) pgf_y(nlz,elem) = int_dp_dx(2) + aux_sum*0.5_WP end do ! --> do elem=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine pressure_force_4_zxxxx_easypgf ! ! ! !=============================================================================== -SUBROUTINE densityJM_local(t, s, pz, rho_out, mesh) +SUBROUTINE densityJM_local(t, s, pz, rho_out, partit, mesh) USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM -use g_PARSUP !, only: par_ex,pe_status use densityJM_components_interface IMPLICIT NONE @@ -2564,16 +2792,19 @@ SUBROUTINE densityJM_local(t, s, pz, rho_out, mesh) ! - has been derived from the SPEM subroutine rhocal ! !--------------------------------------------------------------------------- - - real(kind=WP), intent(IN) :: t,s,pz - real(kind=WP), intent(OUT) :: rho_out - real(kind=WP) :: rhopot, bulk - real(kind=WP) :: bulk_0, bulk_pz, bulk_pz2 - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s,pz + real(kind=WP), intent(OUT) :: rho_out + real(kind=WP) :: rhopot, bulk + real(kind=WP) :: bulk_0, bulk_pz, bulk_pz2 +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !compute secant bulk modulus - call densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) bulk = bulk_0 + pz*(bulk_pz + pz*bulk_pz2) @@ -2583,11 +2814,12 @@ end subroutine densityJM_local ! ! !=============================================================================== -SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) +SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM -use g_PARSUP !, only: par_ex,pe_status IMPLICIT NONE ! @@ -2601,9 +2833,11 @@ SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) !--------------------------------------------------------------------------- ! N. Rakowski 2014 the split form !--------------------------------------------------------------------------- - real(kind=WP), intent(IN) :: t,s - real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rhopot - real(kind=WP) :: s_sqrt + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s + real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2, rhopot + real(kind=WP) :: s_sqrt real(kind=WP), parameter :: a0 = 19092.56, at = 209.8925 real(kind=WP), parameter :: at2 = -3.041638, at3 = -1.852732e-3 @@ -2629,9 +2863,11 @@ SUBROUTINE densityJM_components(t, s, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) real(kind=WP), parameter :: bst4 = 5.38750e-9 real(kind=WP), parameter :: bss = -5.72466e-3, bsst = 1.02270e-4 real(kind=WP), parameter :: bsst2 = -1.65460e-6,bss2 = 4.8314e-4 - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !compute secant bulk modulus @@ -2733,7 +2969,7 @@ end function atg ! ! !=============================================================================== -subroutine sw_alpha_beta(TF1,SF1, mesh) +subroutine sw_alpha_beta(TF1,SF1, partit, mesh) ! DESCRIPTION: ! A function to calculate the thermal expansion coefficient ! and saline contraction coefficient. (elementwise) @@ -2758,20 +2994,27 @@ subroutine sw_alpha_beta(TF1,SF1, mesh) ! a_over_b=0.34765 psu*C^-1 @ S=40.0psu, ptmp=10.0C, p=4000db !----------------------------------------------------------------- use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP use o_arrays - use g_parsup use o_param use g_comm_auto implicit none ! - type(t_mesh), intent(in) , target :: mesh - integer :: n, nz, nzmin, nzmax - real(kind=WP) :: t1,t1_2,t1_3,t1_4,p1,p1_2,p1_3,s1,s35,s35_2 - real(kind=WP) :: a_over_b - real(kind=WP) :: TF1(mesh%nl-1, myDim_nod2D+eDim_nod2D),SF1(mesh%nl-1, myDim_nod2D+eDim_nod2D) + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: n, nz, nzmin, nzmax + real(kind=WP) :: t1, t1_2, t1_3, t1_4, p1, p1_2, p1_3, s1, s35, s35_2 + real(kind=WP) :: a_over_b + real(kind=WP) :: TF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D),SF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, t1, t1_2, t1_3, t1_4, p1, p1_2, p1_3, s1, s35, s35_2, a_over_b) +!$OMP DO do n = 1,myDim_nod2d nzmin = ulevels_nod2d(n) nzmax = nlevels_nod2d(n) @@ -2816,14 +3059,17 @@ subroutine sw_alpha_beta(TF1,SF1, mesh) sw_alpha(nz,n) = a_over_b*sw_beta(nz,n) end do end do -call exchange_nod(sw_alpha) -call exchange_nod(sw_beta) +!$OMP END DO +!$OMP END PARALLEL +call exchange_nod(sw_alpha, partit) +call exchange_nod(sw_beta, partit) +!$OMP BARRIER end subroutine sw_alpha_beta ! ! ! !=============================================================================== -subroutine compute_sigma_xy(TF1,SF1, mesh) +subroutine compute_sigma_xy(TF1,SF1, partit, mesh) !-------------------------------------------------------------------- ! DESCRIPTION: ! computes density gradient @@ -2832,24 +3078,31 @@ subroutine compute_sigma_xy(TF1,SF1, mesh) ! SF = salinity [psu (PSS-78)] ! TF = potential temperature [degree C (ITS-90)] ! - ! OUTPUT: + ! OUTPUT:7 ! based on thermal expansion and saline contraction coefficients ! computes density gradient sigma_xy !------------------------------------------------------------------- use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP use o_param use o_arrays - use g_parsup use g_comm_auto implicit none ! - type(t_mesh), intent(in) , target :: mesh - real(kind=WP), intent(IN) :: TF1(mesh%nl-1, myDim_nod2D+eDim_nod2D), SF1(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP) :: tx(mesh%nl-1), ty(mesh%nl-1), sx(mesh%nl-1), sy(mesh%nl-1), vol(mesh%nl-1), testino(2) - integer :: n, nz, elnodes(3),el, k, nln, uln, nle, ule + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: TF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D), SF1(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: tx(mesh%nl-1), ty(mesh%nl-1), sx(mesh%nl-1), sy(mesh%nl-1), vol(mesh%nl-1), testino(2) + integer :: n, nz, elnodes(3),el, k, nln, uln, nle, ule -#include "associate_mesh.h" - ! +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(tx, ty, sx, sy, vol, testino, n, nz, elnodes, el, k, nln, uln, nle, ule) +!$OMP DO DO n=1, myDim_nod2D nln = nlevels_nod2D(n)-1 uln = ulevels_nod2D(n) @@ -2895,38 +3148,46 @@ subroutine compute_sigma_xy(TF1,SF1, mesh) sigma_xy(1,uln:nln,n) = (-sw_alpha(uln:nln,n)*tx(uln:nln)+sw_beta(uln:nln,n)*sx(uln:nln))/vol(uln:nln)*density_0 sigma_xy(2,uln:nln,n) = (-sw_alpha(uln:nln,n)*ty(uln:nln)+sw_beta(uln:nln,n)*sy(uln:nln))/vol(uln:nln)*density_0 END DO - - call exchange_nod(sigma_xy) +!$OMP END DO +!$OMP END PARALLEL + call exchange_nod(sigma_xy, partit) +!$OMP BARRIER end subroutine compute_sigma_xy ! ! ! !=============================================================================== -subroutine compute_neutral_slope(mesh) +subroutine compute_neutral_slope(partit, mesh) use o_ARRAYS - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP use MOD_MESH use o_param use g_config use g_comm_auto IMPLICIT NONE - real(kind=WP) :: deltaX1,deltaY1,deltaX2,deltaY2 - integer :: edge - integer :: n,nz,nl1,ul1,el(2),elnodes(3),enodes(2) - real(kind=WP) :: c, ro_z_inv,eps,S_cr,S_d - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: edge + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + integer :: n, nz, nl1, ul1, el(2), elnodes(3), enodes(2) + real(kind=WP) :: c, ro_z_inv, eps, S_cr, S_d -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !if sigma_xy is not computed eps=5.0e-6_WP S_cr=1.0e-2_WP S_d=1.0e-3_WP - slope_tapered=0._WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, n, nz, nl1, ul1, el, elnodes, enodes, c, ro_z_inv) +!$OMP DO do n=1, myDim_nod2D + slope_tapered(: , :, n)=0._WP nl1=nlevels_nod2d(n)-1 ul1=ulevels_nod2d(n) - !!PS do nz = 2,nl1 - do nz = ul1+1,nl1 + do nz = ul1+1, nl1 ro_z_inv=2._WP*g/density_0/max(bvfreq(nz,n)+bvfreq(nz+1,n), eps**2) !without minus, because neutral slope S=-(nabla\rho)/(d\rho/dz) neutral_slope(1,nz,n)=sigma_xy(1,nz,n)*ro_z_inv neutral_slope(2,nz,n)=sigma_xy(2,nz,n)*ro_z_inv @@ -2936,43 +3197,52 @@ subroutine compute_neutral_slope(mesh) c=0.5_WP*(1.0_WP + tanh((S_cr - neutral_slope(3,nz,n))/S_d)) if ((bvfreq(nz,n) <= 0.0_WP) .or. (bvfreq(nz+1,n) <= 0.0_WP)) c=0.0_WP slope_tapered(:,nz,n)=neutral_slope(:,nz,n)*c -! slope_tapered(:,nl1-1:nl1,n)=0. -! slope_tapered(:,1:2,n) =0. enddo enddo - - call exchange_nod(neutral_slope) - call exchange_nod(slope_tapered) +!$OMP END DO +!$OMP END PARALLEL + call exchange_nod(neutral_slope, partit) + call exchange_nod(slope_tapered, partit) +!$OMP BARRIER end subroutine compute_neutral_slope ! ! ! !=============================================================================== !converts insitu temperature to a potential one -! tr_arr(:,:,1) will be modified! -subroutine insitu2pot(mesh) +! tracers%data(1)%values will be modified! +subroutine insitu2pot(tracers, partit, mesh) use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer use o_param use o_arrays use g_config - use g_PARSUP implicit none - real(kind=WP), external :: ptheta - real(kind=WP) :: pp, pr, tt, ss - integer :: n, nz, nzmin,nzmax - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + real(kind=WP), external :: ptheta + real(kind=WP) :: pp, pr, tt, ss + integer :: n, nz, nzmin, nzmax + real(kind=WP), dimension(:,:), pointer :: temp, salt +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + temp=>tracers%data(1)%values(:,:) + salt=>tracers%data(2)%values(:,:) ! Convert in situ temperature into potential temperature pr=0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, tt, ss, pp) do n=1,myDim_nod2d+eDim_nod2D nzmin = ulevels_nod2D(n) nzmax = nlevels_nod2D(n) !!PS do nz=1, nlevels_nod2D(n)-1 do nz=nzmin, nzmax-1 - tt=tr_arr(nz,n,1) - ss=tr_arr(nz,n,2) + tt=temp(nz,n) + ss=salt(nz,n) !!PS ___________________________________________________________________ !!PS using here Z_3d_n at the beginning makes the model very instable after @@ -2981,29 +3251,34 @@ subroutine insitu2pot(mesh) !!PS anyway do a spinup and it its only used at initialisation time !!PS pp=abs(Z_3d_n(nz,n)) pp=abs(Z(nz)) - tr_arr(nz,n,1)=ptheta(ss, tt, pp, pr) + temp(nz,n)=ptheta(ss, tt, pp, pr) end do end do +!$OMP END PARALLEL DO end subroutine insitu2pot ! ! ! !=============================================================================== -SUBROUTINE density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, mesh) +SUBROUTINE density_linear(t, s, bulk_0, bulk_pz, bulk_pz2, rho_out, partit, mesh) !coded by Margarita Smolentseva, 21.05.2020 USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP !, only: par_ex,pe_status USE o_ARRAYS USE o_PARAM -use g_PARSUP !, only: par_ex,pe_status use g_config !, only: which_toy, toy_ocean IMPLICIT NONE - - real(kind=WP), intent(IN) :: t,s - real(kind=WP), intent(OUT) :: rho_out - real(kind=WP) :: rhopot, bulk - real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2 - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP), intent(IN) :: t,s + real(kind=WP), intent(OUT) :: rho_out + real(kind=WP) :: rhopot, bulk + real(kind=WP), intent(OUT) :: bulk_0, bulk_pz, bulk_pz2 +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !compute secant bulk modulus bulk_0 = 1 @@ -3021,42 +3296,42 @@ end subroutine density_linear ! ! !=============================================================================== -subroutine init_ref_density(mesh) +subroutine init_ref_density(partit, mesh) ! compute reference density ! Coded by Qiang Wang ! Reviewed by ?? !___________________________________________________________________________ USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM use o_ARRAYS - use g_PARSUP use densityJM_components_interface implicit none !___________________________________________________________________________ - type(t_mesh), intent(in) , target :: mesh - integer :: node, nz, nzmin, nzmax - real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2, rho - real(kind=8) :: T, S, auxz - -#include "associate_mesh.h" + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: node, nz, nzmin, nzmax + real(kind=WP) :: rhopot, bulk_0, bulk_pz, bulk_pz2, rho + real(kind=8) :: T, S, auxz +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ -!!PS S=34. -!!PS T=2.0 - - !___________________________________________________________________________ - density_ref = 0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax, rhopot, bulk_0, bulk_pz, bulk_pz2, rho, T, S, auxz) do node=1,myDim_nod2d+eDim_nod2d - !!PS nzmin = ulevels_nod2d(node) + density_ref(:, node) = 0.0_WP nzmin = 1 nzmax = nlevels_nod2d(node)-1 auxz=min(0.0,Z_3d_n(nzmin,node)) !_______________________________________________________________________ - call densityJM_components(density_ref_T, density_ref_S, bulk_0, bulk_pz, bulk_pz2, rhopot, mesh) + call densityJM_components(density_ref_T, density_ref_S, bulk_0, bulk_pz, bulk_pz2, rhopot, partit, mesh) rho = bulk_0 + auxz*bulk_pz + auxz*bulk_pz2 - density_ref(nzmin,node) = rho*rhopot/(rho+0.1_WP*auxz) + density_ref(nzmin, node) = rho*rhopot/(rho+0.1_WP*auxz) !_______________________________________________________________________ do nz=nzmin+1,nzmax @@ -3065,6 +3340,7 @@ subroutine init_ref_density(mesh) density_ref(nz,node) = rho*rhopot/(rho+0.1_WP*auxz) end do end do +!$OMP END PARALLEL DO if(mype==0) write(*,*) ' --> compute reference density' end subroutine init_ref_density diff --git a/src/oce_ale_tracer.F90 b/src/oce_ale_tracer.F90 index dd8d8411f..7a0bc6d3a 100644 --- a/src/oce_ale_tracer.F90 +++ b/src/oce_ale_tracer.F90 @@ -1,168 +1,261 @@ module diff_part_hor_redi_interface - interface - subroutine diff_part_hor_redi(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module -module adv_tracers_muscle_ale_interface - interface - subroutine adv_tracers_muscle_ale(ttfAB, num_ord, do_Xmoment, mesh) - use MOD_MESH - use g_PARSUP - type(t_mesh), intent(in) , target :: mesh - integer :: do_Xmoment - real(kind=WP) :: ttfAB(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP) :: num_ord - end subroutine - end interface -end module -module adv_tracers_vert_ppm_ale_interface - interface - subroutine adv_tracers_vert_ppm_ale(ttf, do_Xmoment, mesh) - use MOD_MESH - use g_PARSUP - type(t_mesh), intent(in) , target :: mesh - integer :: do_Xmoment - real(kind=WP) :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - end subroutine - end interface -end module -module adv_tracers_ale_interface - interface - subroutine adv_tracers_ale(tr_num, mesh) - use mod_mesh - integer :: tr_num - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface + interface + subroutine diff_part_hor_redi(tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module + module diff_ver_part_expl_ale_interface - interface - subroutine diff_ver_part_expl_ale(tr_num, mesh) - use MOD_MESH - type(t_mesh), intent(in) , target :: mesh - integer :: tr_num - end subroutine - end interface + interface + subroutine diff_ver_part_expl_ale(tr_num, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + integer , intent(in) , target :: tr_num + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module + module diff_ver_part_redi_expl_interface - interface - subroutine diff_ver_part_redi_expl(mesh) - use MOD_MESH - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface + interface + subroutine diff_ver_part_redi_expl(tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module + module diff_ver_part_impl_ale_interface - interface - subroutine diff_ver_part_impl_ale(tr_num, mesh) - use MOD_MESH - type(t_mesh), intent(in) , target :: mesh - integer :: tr_num - end subroutine - end interface + interface + subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + integer , intent(in) , target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module + module diff_tracers_ale_interface - interface - subroutine diff_tracers_ale(tr_num, mesh) - use mod_mesh - integer, intent(in) :: tr_num - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface + interface + subroutine diff_tracers_ale(tr_num, dynamics, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + integer , intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module + module bc_surface_interface - interface - function bc_surface(n, id, mesh) - use mod_mesh - integer , intent(in) :: n, id - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: bc_surface - end function - end interface + interface + function bc_surface(n, id, sval, partit) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + integer , intent(in) :: n, id + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: bc_surface + real(kind=WP), intent(in) :: sval + end function + end interface end module + module diff_part_bh_interface - interface - subroutine diff_part_bh(ttf, mesh) - use MOD_MESH - use g_PARSUP - type(t_mesh) , intent(in), target :: mesh - real(kind=WP), intent(inout), target :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - end subroutine - end interface + interface + subroutine diff_part_bh(tr_num, dynamics, tracer, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + integer , intent(in) , target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracer + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module +module solve_tracers_ale_interface + interface + subroutine solve_tracers_ale(ice, dynamics, tracers, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + USE MOD_ICE + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module ! ! !=============================================================================== ! Driving routine Here with ALE changes!!! -subroutine solve_tracers_ale(mesh) +subroutine solve_tracers_ale(ice, dynamics, tracers, partit, mesh) use g_config - use g_parsup - use o_PARAM, only: num_tracers, SPP, Fer_GM - use o_arrays + use o_PARAM, only: SPP, Fer_GM use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE MOD_ICE + use mod_tracer use g_comm_auto use o_tracers use Toy_Channel_Soufflet - use adv_tracers_ale_interface use diff_tracers_ale_interface - + use oce_adv_tra_driver_interfaces implicit none - type(t_mesh), intent(in) , target :: mesh - integer :: tr_num, node, nzmax, nzmin - real(kind=WP) :: aux_tr(mesh%nl-1,myDim_nod2D+eDim_nod2D) + type(t_ice) , intent(in) , target :: ice + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: tr_num, node, elem, nzmax, nzmin + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, fer_UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, Wvel_e, Wvel_i, fer_Wvel + real(kind=WP), dimension(:,:) , pointer :: del_ttf +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + Wvel_e => dynamics%w_e(:,:) + Wvel_i => dynamics%w_i(:,:) + if (Fer_GM) then + fer_UV => dynamics%fer_uv(:,:,:) + fer_Wvel => dynamics%fer_w(:,:) + end if + del_ttf => tracers%work%del_ttf -#include "associate_mesh.h" !___________________________________________________________________________ - if (SPP) call cal_rejected_salt(mesh) - if (SPP) call app_rejected_salt(mesh) + if (SPP) call cal_rejected_salt(ice, partit, mesh) + if (SPP) call app_rejected_salt(tracers%data(2)%values, partit, mesh) !___________________________________________________________________________ ! update 3D velocities with the bolus velocities: ! 1. bolus velocities are computed according to GM implementation after R. Ferrari et al., 2010 ! 2. bolus velocities are used only for advecting tracers and shall be subtracted back afterwards if (Fer_GM) then - UV =UV +fer_UV - Wvel_e=Wvel_e+fer_Wvel - Wvel =Wvel +fer_Wvel +!$OMP PARALLEL DO + do elem=1, myDim_elem2D+eDim_elem2D + UV(:, :, elem) =UV(:, :, elem) + fer_UV(:, :, elem) + end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DO + do node=1, myDim_nod2D+eDim_nod2D + Wvel_e(:, node)=Wvel_e(:, node)+fer_Wvel(:, node) + Wvel (:, node)=Wvel (:, node)+fer_Wvel(:, node) + end do +!$OMP END PARALLEL DO end if + !___________________________________________________________________________ ! loop over all tracers - do tr_num=1,num_tracers + do tr_num=1, tracers%num_tracers ! do tracer AB (Adams-Bashfort) interpolation only for advectiv part ! needed if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call init_tracers_AB'//achar(27)//'[0m' - call init_tracers_AB(tr_num, mesh) + call init_tracers_AB(tr_num, tracers, partit, mesh) ! advect tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call adv_tracers_ale'//achar(27)//'[0m' - call adv_tracers_ale(tr_num, mesh) + ! it will update del_ttf with contributions from horizontal and vertical advection parts (del_ttf_advhoriz and del_ttf_advvert) + call do_oce_adv_tra(dt, UV, Wvel, Wvel_i, Wvel_e, tr_num, dynamics, tracers, partit, mesh) + !___________________________________________________________________________ + ! update array for total tracer flux del_ttf with the fluxes from horizontal + ! and vertical advection +!$OMP PARALLEL DO + do node=1, myDim_nod2d + tracers%work%del_ttf(:, node)=tracers%work%del_ttf(:, node)+tracers%work%del_ttf_advhoriz(:, node)+tracers%work%del_ttf_advvert(:, node) + end do +!$OMP END PARALLEL DO + !___________________________________________________________________________ + ! AB is not needed after the advection step. Initialize it with the current tracer before it is modified. + ! call init_tracers_AB at the beginning of this loop will compute AB for the next time step then. +!$OMP PARALLEL DO + do node=1, myDim_nod2d+eDim_nod2D + tracers%data(tr_num)%valuesAB(:, node)=tracers%data(tr_num)%values(:, node) !DS: check that this is the right place! + end do +!$OMP END PARALLEL DO ! diffuse tracers if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call diff_tracers_ale'//achar(27)//'[0m' - call diff_tracers_ale(tr_num, mesh) - + call diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) + ! relax to salt and temp climatology if (flag_debug .and. mype==0) print *, achar(27)//'[37m'//' --> call relax_to_clim'//achar(27)//'[0m' - call relax_to_clim(tr_num, mesh) - if ((toy_ocean) .AND. (TRIM(which_toy)=="soufflet")) call relax_zonal_temp(mesh) - call exchange_nod(tr_arr(:,:,tr_num)) +! if ((toy_ocean) .AND. ((tr_num==1) .AND. (TRIM(which_toy)=="soufflet"))) then + if ((toy_ocean) .AND. ((TRIM(which_toy)=="soufflet"))) then + call relax_zonal_temp(tracers%data(1), partit, mesh) + else + call relax_to_clim(tr_num, tracers, partit, mesh) + end if + call exchange_nod(tracers%data(tr_num)%values(:,:), partit) +!$OMP BARRIER end do !___________________________________________________________________________ - do tr_num=1, ptracers_restore_total - tr_arr(:,ptracers_restore(tr_num)%ind2,ptracers_restore(tr_num)%locid)=1.0_WP + ! 3D restoring for "passive" tracers + !!!$OMPTODO: add OpenMP later, not needed right now! + do tr_num=1, ptracers_restore_total + tracers%data(ptracers_restore(tr_num)%locid)%values(:, ptracers_restore(tr_num)%ind2)=1.0_WP end do !___________________________________________________________________________ ! subtract the the bolus velocities back from 3D velocities: if (Fer_GM) then - UV =UV -fer_UV - Wvel_e=Wvel_e-fer_Wvel - Wvel =Wvel -fer_Wvel +!$OMP PARALLEL DO + do elem=1, myDim_elem2D+eDim_elem2D + UV(:, :, elem) =UV(:, :, elem) - fer_UV(:, :, elem) + end do +!$OMP END PARALLEL DO +!$OMP PARALLEL DO + do node=1, myDim_nod2D+eDim_nod2D + Wvel_e(:, node)=Wvel_e(:, node)-fer_Wvel(:, node) + Wvel (:, node)=Wvel (:, node)-fer_Wvel(:, node) + end do +!$OMP END PARALLEL DO end if !___________________________________________________________________________ @@ -170,86 +263,29 @@ subroutine solve_tracers_ale(mesh) ! --> if we do only where (tr_arr(:,:,2) < 3._WP ) we also fill up the bottom ! topogrpahy with values which are then writte into the output --> thats why ! do node=1,.... and tr_arr(node,1:nzmax,2) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nzmin, nzmax) do node=1,myDim_nod2D+eDim_nod2D nzmax=nlevels_nod2D(node)-1 nzmin=ulevels_nod2D(node) - !!PS where (tr_arr(1:nzmax,node,2) > 45._WP) - !!PS tr_arr(1:nzmax,node,2)=45._WP - !!PS end where - where (tr_arr(nzmin:nzmax,node,2) > 45._WP) - tr_arr(nzmin:nzmax,node,2)=45._WP + where (tracers%data(2)%values(nzmin:nzmax,node) > 45._WP) + tracers%data(2)%values(nzmin:nzmax,node)=45._WP end where - !!PS where (tr_arr(1:nzmax,node,2) < 3._WP ) - !!PS tr_arr(1:nzmax,node,2)=3._WP - !!PS end where - where (tr_arr(nzmin:nzmax,node,2) < 3._WP ) - tr_arr(nzmin:nzmax,node,2)=3._WP - end where - -!!PS if (nzmin>15 .and. mype==0) then -!!PS write(*,*) ' tr_arr(:,node,1) = ',tr_arr(:,node,1) -!!PS write(*,*) -!!PS write(*,*) ' tr_arr(:,node,2) = ',tr_arr(:,node,2) -!!PS end if + where (tracers%data(2)%values(nzmin:nzmax,node) < 3._WP ) + tracers%data(2)%values(nzmin:nzmax,node) = 3._WP + end where end do +!$OMP END PARALLEL DO end subroutine solve_tracers_ale ! ! !=============================================================================== -subroutine adv_tracers_ale(tr_num, mesh) - use g_config, only: flag_debug - use g_parsup +subroutine diff_tracers_ale(tr_num, dynamics, tracers, partit, mesh) use mod_mesh - use o_arrays - use diagnostics, only: ldiag_DVD, compute_diag_dvd_2ndmoment_klingbeil_etal_2014, & - compute_diag_dvd_2ndmoment_burchard_etal_2008, compute_diag_dvd - use adv_tracers_muscle_ale_interface - use adv_tracers_vert_ppm_ale_interface - use oce_adv_tra_driver_interfaces - implicit none - integer :: tr_num, node, nz - type(t_mesh), intent(in) , target :: mesh - ! del_ttf ... initialised and setted to zero in call init_tracers_AB(tr_num) - ! --> del_ttf ... equivalent to R_T^n in Danilov etal FESOM2: "from finite element - ! to finite volume". At the end R_T^n should contain all advection therms and - ! the terms due to diffusion. - ! del_ttf=0d0 - - !___________________________________________________________________________ - ! if ldiag_DVD=.true. --> compute tracer second moments for the calcualtion - ! of discret variance decay - if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd_2ndmoment'//achar(27)//'[0m' - call compute_diag_dvd_2ndmoment_klingbeil_etal_2014(tr_num, mesh) - !!PS call compute_diag_dvd_2ndmoment_burchard_etal_2008(tr_num) - end if - - !___________________________________________________________________________ - ! horizontal ale tracer advection - ! here --> add horizontal advection part to del_ttf(nz,n) = del_ttf(nz,n) + ... - del_ttf_advhoriz = 0.0_WP - del_ttf_advvert = 0.0_WP - call do_oce_adv_tra(tr_arr(:,:,tr_num), tr_arr_old(:,:,tr_num), UV, wvel, wvel_i, wvel_e, 1, del_ttf_advhoriz, del_ttf_advvert, tra_adv_ph, tra_adv_pv, mesh) - !___________________________________________________________________________ - ! update array for total tracer flux del_ttf with the fluxes from horizontal - ! and vertical advection - del_ttf=del_ttf+del_ttf_advhoriz+del_ttf_advvert - - !___________________________________________________________________________ - ! compute discrete variance decay after Burchard and Rennau 2008 - if (ldiag_DVD .and. tr_num <= 2) then - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call compute_diag_dvd'//achar(27)//'[0m' - call compute_diag_dvd(tr_num, mesh) - end if - -end subroutine adv_tracers_ale -! -! -!=============================================================================== -subroutine diff_tracers_ale(tr_num, mesh) - use mod_mesh - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN use o_arrays use o_tracers use diff_part_hor_redi_interface @@ -258,205 +294,264 @@ subroutine diff_tracers_ale(tr_num, mesh) use diff_ver_part_impl_ale_interface use diff_part_bh_interface implicit none - - integer, intent(in) :: tr_num - integer :: n, nzmax, nzmin - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + integer , intent(in) , target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh !___________________________________________________________________________ - ! convert tr_arr_old(:,:,tr_num)=ttr_n-0.5 --> prepare to calc ttr_n+0.5 - ! eliminate AB (adams bashfort) interpolates tracer, which is only needed for - ! tracer advection. For diffusion only need tracer from previouse time step - tr_arr_old(:,:,tr_num)=tr_arr(:,:,tr_num) !DS: check that this is the right place! + integer :: n, nzmax, nzmin + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), pointer :: del_ttf(:,:) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + del_ttf => tracers%work%del_ttf !___________________________________________________________________________ ! do horizontal diffusiion ! write there also horizontal diffusion rhs to del_ttf which is equal the R_T^n ! in danilovs srcipt ! includes Redi diffusivity if Redi=.true. - call diff_part_hor_redi(mesh) ! seems to be ~9% faster than diff_part_hor + call diff_part_hor_redi(tracers, partit, mesh) ! seems to be ~9% faster than diff_part_hor !___________________________________________________________________________ - ! do vertical diffusion: explicite - if (.not. i_vert_diff) call diff_ver_part_expl_ale(tr_num, mesh) + ! do vertical diffusion: explicit + if (.not. tracers%data(tr_num)%i_vert_diff) call diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) ! A projection of horizontal Redi diffussivity onto vertical. This par contains horizontal ! derivatives and has to be computed explicitly! - if (Redi) call diff_ver_part_redi_expl(mesh) - + if (Redi) call diff_ver_part_redi_expl(tracers, partit, mesh) + !___________________________________________________________________________ - ! Update tracers --> calculate T* see Danilov etal "FESOM2 from finite elements - ! to finite volume" + ! Update tracers --> calculate T* see Danilov et al. (2017) ! T* = (dt*R_T^n + h^(n-0.5)*T^(n-0.5))/h^(n+0.5) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nzmin, nzmax) do n=1, myDim_nod2D nzmax=nlevels_nod2D(n)-1 nzmin=ulevels_nod2D(n) - !!PS del_ttf(1:nzmax,n)=del_ttf(1:nzmax,n)+tr_arr(1:nzmax,n,tr_num)* & - !!PS (hnode(1:nzmax,n)-hnode_new(1:nzmax,n)) - !!PS tr_arr(1:nzmax,n,tr_num)=tr_arr(1:nzmax,n,tr_num)+ & - !!PS del_ttf(1:nzmax,n)/hnode_new(1:nzmax,n) - - del_ttf(nzmin:nzmax,n)=del_ttf(nzmin:nzmax,n)+tr_arr(nzmin:nzmax,n,tr_num)* & + del_ttf(nzmin:nzmax,n)=del_ttf(nzmin:nzmax,n)+tracers%data(tr_num)%values(nzmin:nzmax,n)* & (hnode(nzmin:nzmax,n)-hnode_new(nzmin:nzmax,n)) - tr_arr(nzmin:nzmax,n,tr_num)=tr_arr(nzmin:nzmax,n,tr_num)+ & + tracers%data(tr_num)%values(nzmin:nzmax,n)=tracers%data(tr_num)%values(nzmin:nzmax,n)+ & del_ttf(nzmin:nzmax,n)/hnode_new(nzmin:nzmax,n) ! WHY NOT ??? --> whats advantage of above --> tested it --> the upper ! equation has a 30% smaller nummerical drift - !tr_arr(1:nzmax,n,tr_num)=(hnode(1:nzmax,n)*tr_arr(1:nzmax,n,tr_num)+ & - ! del_ttf(1:nzmax,n))/hnode_new(1:nzmax,n) + ! tr_arr(1:nzmax,n,tr_num)=(hnode(1:nzmax,n)*tr_arr(1:nzmax,n,tr_num)+ & + ! del_ttf(1:nzmax,n))/hnode_new(1:nzmax,n) end do - +!$OMP END PARALLEL DO !___________________________________________________________________________ - if (i_vert_diff) then + if (tracers%data(tr_num)%i_vert_diff) then ! do vertical diffusion: implicite - call diff_ver_part_impl_ale(tr_num, mesh) - + call diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) end if - !We DO not set del_ttf to zero because it will not be used in this timestep anymore - !init_tracers will set it to zero for the next timestep - !init_tracers will set it to zero for the next timestep - if (smooth_bh_tra) then - call diff_part_bh(tr_arr(:,:,tr_num), mesh) ! alpply biharmonic diffusion (implemented as filter) + !init_tracers_AB will set it to zero for the next timestep + if (tracers%data(tr_num)%smooth_bh_tra) then + call diff_part_bh(tr_num, dynamics, tracers, partit, mesh) ! alpply biharmonic diffusion (implemented as filter) end if end subroutine diff_tracers_ale ! ! !=============================================================================== !Vertical diffusive flux(explicit scheme): -subroutine diff_ver_part_expl_ale(tr_num, mesh) +subroutine diff_ver_part_expl_ale(tr_num, tracers, partit, mesh) use o_ARRAYS use g_forcing_arrays use MOD_MESH - use g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER use g_config,only: dt - implicit none - type(t_mesh), intent(in) , target :: mesh + integer , intent(in) , target :: tr_num + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, nz, nl1, ul1 real(kind=WP) :: vd_flux(mesh%nl-1) - real(kind=WP) :: rdata,flux,rlx - integer :: nz,nl1,ul1, tr_num,n - real(kind=WP) :: zinv1,Ty - -#include "associate_mesh.h" + real(kind=WP) :: rdata, flux, rlx + real(kind=WP) :: zinv1 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), pointer :: del_ttf(:,:) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + del_ttf => tracers%work%del_ttf + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nl1, ul1, vd_flux, rdata, flux, rlx, zinv1) !___________________________________________________________________________ do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 ul1=ulevels_nod2D(n) vd_flux=0._WP - if (tr_num==1) then + if (tracers%data(tr_num)%ID==1) then flux = -heat_flux(n)/vcpw rdata = Tsurf(n) rlx = surf_relax_T - elseif (tr_num==2) then + elseif (tracers%data(tr_num)%ID==2) then flux = virtual_salt(n)+relax_salt(n)- real_salt_flux(n)*is_nonlinfs else flux = 0._WP rdata = 0._WP rlx=0._WP - endif - + endif !_______________________________________________________________________ !Surface forcing - !!PS vd_flux(1)= flux - vd_flux(ul1)= flux - - !_______________________________________________________________________ - !!PS do nz=2,nl1 + vd_flux(ul1)= flux do nz=ul1+1,nl1 !___________________________________________________________________ - zinv1=1.0_WP/(Z_3d_n(nz-1,n)-Z_3d_n(nz,n)) - - !___________________________________________________________________ -! Ty= Kd(4,nz-1,n)*(Z_3d_n(nz-1,n)-zbar_3d_n(nz,n))*zinv1 *neutral_slope(3,nz-1,n)**2 + & -! Kd(4,nz,n)*(zbar_3d_n(nz,n)-Z_3d_n(nz,n))*zinv1 *neutral_slope(3,nz,n)**2 - - vd_flux(nz) = (Kv(nz,n)+Ty)*(tr_arr(nz-1,n,tr_num)-tr_arr(nz,n,tr_num))*zinv1*area(nz,n) - + zinv1=1.0_WP/(Z_3d_n(nz-1,n)-Z_3d_n(nz,n)) + vd_flux(nz) = Kv(nz,n)*(tracers%data(tr_num)%values(nz-1,n)-tracers%data(tr_num)%values(nz,n))*zinv1*area(nz,n) end do - !_______________________________________________________________________ - !!PS do nz=1,nl1-1 do nz=ul1,nl1-1 - del_ttf(nz,n) = del_ttf(nz,n) + (vd_flux(nz) - vd_flux(nz+1))/(zbar_3d_n(nz,n)-zbar_3d_n(nz+1,n))*dt/area(nz,n) + del_ttf(nz,n) = del_ttf(nz,n) + (vd_flux(nz) - vd_flux(nz+1))/(zbar_3d_n(nz,n)-zbar_3d_n(nz+1,n))*dt/areasvol(nz,n) end do - del_ttf(nl1,n) = del_ttf(nl1,n) + (vd_flux(nl1)/(zbar_3d_n(nl1,n)-zbar_3d_n(nl1+1,n)))*dt/area(nl1,n) + del_ttf(nl1,n) = del_ttf(nl1,n) + (vd_flux(nl1)/(zbar_3d_n(nl1,n)-zbar_3d_n(nl1+1,n)))*dt/areasvol(nl1,n) end do ! --> do n=1, myDim_nod2D +!$OMP END PARALLEL DO end subroutine diff_ver_part_expl_ale ! ! !=============================================================================== ! vertical diffusivity augmented with Redi contribution [vertical flux of K(3,3)*d_zT] -subroutine diff_ver_part_impl_ale(tr_num, mesh) +subroutine diff_ver_part_impl_ale(tr_num, dynamics, tracers, partit, mesh) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN use o_PARAM - use o_ARRAYS - use i_ARRAYS - use g_PARSUP + use o_ARRAYS, only: Ki, Kv, heat_flux, water_flux, slope_tapered + USE MOD_PARTIT + USE MOD_PARSUP use g_CONFIG use g_forcing_arrays - use o_mixing_KPP_mod !for ghats _GO_ + use o_mixing_KPP_mod !for ghats _GO_ + use g_cvmix_kpp, only: kpp_nonlcltranspT, kpp_nonlcltranspS, kpp_oblmixc use bc_surface_interface - implicit none - type(t_mesh), intent(in) , target :: mesh -!!PS real(kind=WP) :: bc_surface + integer , intent(in) , target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) - integer :: nz, n, nzmax,nzmin, tr_num - real(kind=WP) :: m, zinv, dt_inv, dz - real(kind=WP) :: rsss, Ty,Ty1, c1,zinv1,zinv2,v_adv - real(kind=WP), external :: TFrez ! Sea water freeze temperature. + integer :: nz, n, nzmax, nzmin + real(kind=WP) :: m, zinv, dz + real(kind=WP) :: rsss, Ty, Ty1, c1, zinv1, zinv2, v_adv real(kind=WP) :: isredi=0._WP logical :: do_wimpl=.true. -#include "associate_mesh.h" - + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) !___________________________________________________________________________ - if ((trim(tra_adv_lim)=='FCT') .OR. (.not. w_split)) do_wimpl=.false. + ! pointer on necessary derived types + real(kind=WP), dimension(:,:), pointer :: trarr + real(kind=WP), dimension(:,:), pointer :: Wvel_i +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + trarr => tracers%data(tr_num)%values(:,:) + Wvel_i => dynamics%w_i(:,:) + !___________________________________________________________________________ + if ((trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') .OR. (.not. dynamics%use_wsplit)) do_wimpl=.false. if (Redi) isredi=1._WP - dt_inv=1.0_WP/dt Ty =0.0_WP Ty1 =0.0_WP ! solve equation diffusion equation implicite part: - ! --> h^(n+0.5)* (T^(n+0.5)-Tstar) = dt*( K_33*d/dz*(T^(n+0.5)-Tstar) + K_33*d/dz*Tstar ) - ! --> dTnew = T^(n+0.5)-Tstar - ! --> h^(n+0.5)* (dTnew) = dt*(K_33*d/dz*dTnew) + K_33*dt*d/dz*Tstar - ! --> h^(n+0.5)* (dTnew) = dt*(K_33*d/dz*dTnew) + RHS - ! --> solve for dT_new - ! - ! ----------- zbar_1, V_1 (Volume eq. to Area) - ! Z_1 o T_1 - ! ----------- zbar_2, V_2 - ! Z_2 o T_2 - ! ----------- zbar_3, V_3 - ! Z_3 o T_3 - ! ----------- zbar_4 - ! : - ! --> Difference Quotient at Volume _2: ddTnew_2/dt + d/dz*K_33 d/dz*dTnew_2 = 0 --> homogene solution - ! V2*dTnew_2 *h^(n+0.5) = -dt * [ (dTnew_1-dTnew_2)/(Z_1-Z_2)*V_2 + (dTnew_2-dTnew_3)/(Z_2-Z_3)*V_3 ] + RHS - ! dTnew_2 *h^(n+0.5) = -dt * [ (dTnew_1-dTnew_2)/(Z_1-Z_2)*V_2 + (dTnew_2-dTnew_3)/(Z_2-Z_3)*V_3/V_2 ] + RHS - ! | | - ! v v - ! diffusive flux towards diffusive flux towards - ! T_2 trough boundary V2 T_2 trough boundary V3 - ! - ! --> solve coefficents for homogene part - ! dTnew_2 *h^(n+0.5) = -dt * [ a*dTnew_1 + b*dTnew_2 + c*dTnew_3 ] + ! --> h^(n+0.5)* (T^(n+0.5)-Tstar) = dt*( K_33*d/dz*(T^(n+0.5)-Tstar) + K_33*d/dz*Tstar ) + ! --> Tnew = T^(n+0.5)-Tstar + ! --> h^(n+0.5)* (Tnew) = dt*(K_33*d/dz*Tnew) + K_33*dt*d/dz*Tstar + ! --> h^(n+0.5)* (Tnew) = dt*(K_33*d/dz*Tnew) + RHS + ! --> solve for T_new + ! --> V_1 (Skalar Volume), A_1 (Area of edge), . + ! no Cavity A1==V1, yes Cavity A1 !=V1 /I\ nvec_up (+1) + ! I + ! ----------- zbar_1, A_1 *----I----* + ! Z_1 o T_1, V1 |\ I ./| + ! ----------- zbar_2, A_2 | \ ./ | Gaus Theorem: + ! Z_2 o T_2, V2 | \ / | --> Flux form + ! ----------- zbar_3, A_3 | | | --> normal vec outwards facing + ! Z_3 o T_3, V3 *---|-----* + ! ----------- zbar_4 \ | I ./ + ! : \ | I/ + ! \|/I + ! * I + ! \I/ + ! * nvec_dwn (-1) + ! --> 1st. solve homogenouse part: + ! f(Tnew) = h^(n+0.5)* (Tnew) - dt*(K_33*dTnew/dz) = 0 + ! + ! --> 2nd. Difference Quotient at Tnew_i in flux form (Gaus Theorem, dont forget normal vectors!!!): + ! V_i*Tnew_i *h_i = -dt * [ K_33 * (Tnew_i-1 - Tnew_i)/(Z_i-1 - Z_i) * A_i * nvec_up + ! +K_33 * (Tnew_i - Tnew_i+1)/(Z_i - Z_i+1) * A_i+1 * nvec_dwn ] + ! Tnew_i *h_i = -dt * [ K_33 * (Tnew_i-1 - Tnew_i)/(Z_i-1 - Z_i) * A_i /V_i * nvec_up + ! +K_33 * (Tnew_i - Tnew_i+1)/(Z_i - Z_i+1) * A_i+1/V_i * nvec_dwn ] ! - ! --> a = -dt*K_33/(Z_1-Z_2) + ! --> 3rd. solve for coefficents a, b, c: + ! f(Tnew) = [ a*dTnew_i-1 + b*dTnew_i + c*dTnew_i+1 ] + ! + ! df(Tnew)/dTnew_i-1 = a = -dt*K_33/(Z_i-1 - Z_i) * A_i/V_i * (nvec_up =1) ! - ! --> c = -dt*K_33/(Z_2-Z_3)*V_3/V_2 + ! df(Tnew)/dTnew_i+1 = c = dt * K_33 * 1/(Z_i - Z_i+1) * A_i+1/V_i * (nvec_dwn=-1) + ! = -dt * K_33 * 1/(Z_i - Z_i+1) * A_i+1/V_i + ! + ! df(Tnew)/dTnew_i = b = h_i + dt*K_33/(Z_i-1 - Z_i) * A_i/V_i * (nvec_up=+1) + ! - dt*K_33/(Z_i - Z_i+1) * A_i+1/V_i * (nvec_dwn=-1) + ! = h_i + dt*K_33/(Z_i-1 - Z_i) * A_i/V_i + ! + dt*K_33/(Z_i - Z_i+1) * A_i+1/V_i + ! = h_i -(a+c) + ! + ! --> 4th. solve inhomogenous part: + ! [ a*dTnew_i-1 + b*dTnew_i + c*dTnew_i+1 ] = RHS/V_i + ! + ! RHS = K_33*dt*d/dz*Tstar ! - ! --> b = h^(n+0.5) -[ dt*K_33/(Z_1-Z_2) + dt*K_33/(Z_2-Z_3)*V_3/V_2 ] = -(a+c) + h^(n+0.5) + ! --> write as Difference Quotient in flux form + ! RHS/V_i = K_33 * dt * (Tstar_i-1 - Tstar_i)/(Z_i-1 - Z_i) * A_i/V_i * (nvec_up=1) + ! + K_33 * dt * (Tstar_i - Tstar_i+1)/(Z_i - Z_i+1) * A_i+1/V_i * (nvec_dwn=-1) + ! + ! = K_33*dt/(Z_i-1 - Z_i) * A_i/V_i * Tstar_i-1 + ! - K_33*dt/(Z_i-1 - Z_i) * A_i/V_i * Tstar_i + ! - K_33*dt/(Z_i - Z_i+1) * A_i+1/V_i * Tstar_i + ! + K_33*dt/(Z_i - Z_i+1) * A_i+1/V_i * Tstar_i+1 + ! + ! = -a*Tstar_i-1 + (a+c)*Tstar_i - c * Tstar_i+1 + ! |-> b = h_i - (a+c), a+c = h_i-b + ! + ! = -a*Tstar_i-1 - (b-h_i)*Tstar_i - c * Tstar_i+1 + ! + ! --> 5th. solve for Tnew_i --> forward sweep algorithm --> see lower + ! | b_1 c_1 ... | |dTnew_1| + ! | a_2 b_2 c_2 ... | |dTnew_2| + ! | a_3 b_3 c_3 ... | * |dTnew_3| = RHS/V_i + ! | a_4 b_4 c_4 ...| |dTnew_4| + ! | : | | : | + ! + ! --> a = -dt*K_33 / (Z_i-1 - Z_i) * A_i/V_i + ! + ! --> c = -dt*K_33 / (Z_i - Z_i+1) * A_i+1/V_i + ! + ! --> b = h^(n+0.5) -[ dt*K_33/(Z_i-1 - Z_i)*A_i/V_i + dt*K_33/(Z_i - Z_i+1) * A_i+1/V_i ] = -(a+c) + h^(n+0.5) !___________________________________________________________________________ ! loop over local nodes - do n=1,myDim_nod2D - + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, a, b, c, tr, cp, tp, m, zinv, dz, & +!$OMP rsss, Ty, Ty1, c1, zinv1, zinv2, v_adv, zbar_n, z_n) +!$OMP DO + do n=1,myDim_nod2D ! initialise a = 0.0_WP b = 0.0_WP @@ -464,11 +559,9 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) tr = 0.0_WP tp = 0.0_WP cp = 0.0_WP - ! max. number of levels at node n nzmax=nlevels_nod2D(n) nzmin=ulevels_nod2D(n) - !___________________________________________________________________________ ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because ! they be calculate from the actualized mesh with hnode_new @@ -477,20 +570,15 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! Be carefull here vertical operation have to be done on NEW vertical mesh !!! zbar_n=0.0_WP Z_n=0.0_WP -! zbar_n(nzmax)=zbar(nzmax) zbar_n(nzmax)=zbar_n_bot(n) Z_n(nzmax-1)=zbar_n(nzmax) + hnode_new(nzmax-1,n)/2.0_WP - !!PS do nz=nzmax-1,2,-1 do nz=nzmax-1,nzmin+1,-1 zbar_n(nz) = zbar_n(nz+1) + hnode_new(nz,n) Z_n(nz-1) = zbar_n(nz) + hnode_new(nz-1,n)/2.0_WP end do - !!PS zbar_n(1) = zbar_n(2) + hnode_new(1,n) zbar_n(nzmin) = zbar_n(nzmin+1) + hnode_new(nzmin,n) - !_______________________________________________________________________ ! Regular part of coefficients: --> surface layer - !!PS nz=1 nz=nzmin ! 1/dz(nz) @@ -504,23 +592,28 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! layer dependent coefficients for for solving dT(1)/dt+d/dz*K_33*d/dz*T(1) = ... a(nz)=0.0_WP - !!PS c(nz)=-(Kv(2,n)+Ty1)*zinv2*zinv*area(nz+1,n)/area(nz,n) - c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/area(nz,n) + !!PS c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * (area(nz+1,n)/areasvol(nz,n)) + c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * area(nz+1,n)/areasvol(nz,n) b(nz)=-c(nz)+hnode_new(nz,n) ! ale - ! update from the vertical advection --> comes from splitting of vert ! velocity into explicite and implicite contribution if (do_wimpl) then - v_adv=zinv*area(nz+1,n)/area(nz,n) - b(nz)=b(nz)+Wvel_i(nz, n)*zinv-min(0._WP, Wvel_i(nz+1, n))*v_adv - c(nz)=c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv + !___________________________________________________________________ + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + v_adv =zinv * ( area(nz ,n)/areasvol(nz,n) ) + b(nz) =b(nz)+Wvel_i(nz, n)*v_adv + + !!PS v_adv =zinv * ( area(nz+1,n)/areasvol(nz,n) ) + v_adv =zinv * area(nz+1,n)/areasvol(nz,n) + b(nz) =b(nz)-min(0._WP, Wvel_i(nz+1, n))*v_adv + c(nz) =c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv end if ! backup zinv2 for next depth level zinv1=zinv2 - !_______________________________________________________________________ ! Regular part of coefficients: --> 2nd...nl-2 layer - !!PS do nz=2, nzmax-2 do nz=nzmin+1, nzmax-2 ! 1/dz(nz) @@ -532,24 +625,35 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) (zbar_n(nz+1)-Z_n(nz+1 ))*zinv2 *slope_tapered(3,nz+1,n)**2*Ki(nz+1,n) Ty =Ty *isredi Ty1=Ty1*isredi + ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... - a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv - c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv*area(nz+1,n)/area(nz,n) + !___________________________________________________________________ + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + a(nz)=-(Kv(nz,n) +Ty )*zinv1*zinv * ( area(nz ,n)/areasvol(nz,n) ) + !!PS c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * ( area(nz+1,n)/areasvol(nz,n) ) + c(nz)=-(Kv(nz+1,n)+Ty1)*zinv2*zinv * area(nz+1,n)/areasvol(nz,n) b(nz)=-a(nz)-c(nz)+hnode_new(nz,n) ! backup zinv2 for next depth level zinv1=zinv2 - ! update from the vertical advection if (do_wimpl) then - v_adv=zinv + !_______________________________________________________________ + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + v_adv=zinv * ( area(nz ,n)/areasvol(nz,n) ) a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv - v_adv=v_adv*area(nz+1,n)/area(nz,n) + !!PS v_adv=v_adv*areasvol(nz+1,n)/areasvol(nz,n) + !!PS v_adv=zinv * ( area(nz+1,n)/areasvol(nz,n) ) + v_adv=zinv * area(nz+1,n)/areasvol(nz,n) b(nz)=b(nz)-min(0._WP, Wvel_i(nz+1, n))*v_adv c(nz)=c(nz)-max(0._WP, Wvel_i(nz+1, n))*v_adv end if - end do ! --> do nz=2, nzmax-2 + end do ! --> do nz=nzmin+1, nzmax-2 !_______________________________________________________________________ ! Regular part of coefficients: --> nl-1 layer @@ -558,88 +662,169 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) zinv=1.0_WP*dt ! no ... /(zbar(nzmax-1)-zbar(nzmax)) because of ale ! calculate isoneutral diffusivity : Kd*s^2 --> K_33 = Kv + Kd*s^2 - Ty= (Z_n(nz-1)-zbar_n(nz)) *zinv1 *slope_tapered(3,nz-1,n)**2*Ki(nz-1,n) + & - (zbar_n(nz)-Z_n(nz)) *zinv1 *slope_tapered(3,nz,n)**2 *Ki(nz,n) + Ty= (Z_n(nz-1) -zbar_n(nz)) * zinv1 * slope_tapered(3,nz-1,n)**2 * Ki(nz-1,n) + & + (zbar_n(nz)-Z_n(nz) ) * zinv1 * slope_tapered(3,nz ,n)**2 * Ki(nz,n) Ty =Ty *isredi ! layer dependent coefficients for for solving dT(nz)/dt+d/dz*K_33*d/dz*T(nz) = ... - a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv + + !___________________________________________________________________ + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + a(nz)=-(Kv(nz,n)+Ty)*zinv1*zinv* ( area(nz ,n)/areasvol(nz,n) ) c(nz)=0.0_WP b(nz)=-a(nz)+hnode_new(nz,n) ! update from the vertical advection if (do_wimpl) then - v_adv=zinv + !___________________________________________________________________ + ! use brackets when computing ( area(nz ,n)/areasvol(nz,n) ) for + ! numerical reasons, to gurante that area/areasvol in case of no + ! cavity is ==1.0_WP + v_adv=zinv* ( area(nz ,n)/areasvol(nz,n) ) a(nz)=a(nz)+min(0._WP, Wvel_i(nz, n))*v_adv b(nz)=b(nz)+max(0._WP, Wvel_i(nz, n))*v_adv end if !_______________________________________________________________________ - ! the rhs (inhomogene part): --> rhs = K_33*dt*d/dz*Tstar --> Tstar...tr_arr + ! the rhs (inhomogene part): --> rhs = K_33*dt*d/dz*Tstar --> Tstar...trarr ! solve difference quotient for rhs --> tr ! RHS at Volume_2: ! ! RHS*V_2 = K_33*dt*(T_1-T_2)/(Z_1-Z_2)*V_2 - K_33*dt*(T_2-T_3)/(Z_2-Z_3)*V_3 ! = -a*T_1 + (a+c)*T_2 - c*T_3 ! - ! -+--> tr(1) =(a(1)+c(1))*tr_arr(1,n,tr_num)-c(1)*tr_arr(2,n,tr_num) + ! -+--> tr(1) =(a(1)+c(1))*trarr(1,n)-c(1)*trarr(2,n) ! |--> a(1)=0 - !!PS nz=1 nz=nzmin dz=hnode_new(nz,n) ! It would be (zbar(nz)-zbar(nz+1)) if not ALE - tr(nz)=-(b(nz)-dz)*tr_arr(nz,n,tr_num)-c(nz)*tr_arr(nz+1,n,tr_num) - !tr(nz)=c(nz)*(tr_arr(nz,n,tr_num) - tr_arr(nz+1,n,tr_num)) - + tr(nz)=-(b(nz)-dz)*trarr(nz,n)-c(nz)*trarr(nz+1,n) - ! ******************************************************************* - ! nonlocal transport to the rhs (only T and S currently) _GO_ - ! ******************************************************************* - ! rsss will be used later to compute: - ! 1. the virtual salinity flux - ! 2. the contribution from the nonlocal term in KPP for salinity - if (tr_num==2) then - rsss=ref_sss - if (ref_sss_local) rsss=tr_arr(1,n,2) - end if - - !!PS do nz=2,nzmax-2 do nz=nzmin+1,nzmax-2 dz=hnode_new(nz,n) - tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num)-(b(nz)-dz)*tr_arr(nz,n,tr_num)-c(nz)*tr_arr(nz+1,n,tr_num) - !tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num) & - ! -c(nz)*tr_arr(nz+1,n,tr_num) & - ! +(a(nz)+c(nz))*tr_arr(nz,n,tr_num) - - ! ******************************************************************* - ! nonlocal transport to the rhs (only T and S currently) _GO_ - ! ******************************************************************* -!leads to non conservation in 8th digit. needs to be checked! -! if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then -! if (tr_num==1) then ! T -! tr(nz)=tr(nz)+(MIN(ghats(nz,n)*Kv(nz,n), 1.0_WP)-MIN(ghats(nz+1,n)*Kv(nz+1,n), 1.0_WP)*area(nz+1,n)/area(nz,n))*heat_flux(n)/vcpw -! elseif (tr_num==2) then ! S -! tr(nz)=tr(nz)-(MIN(ghats(nz,n)*Kv(nz,n), 1.0_WP)-MIN(ghats(nz+1,n)*Kv(nz+1,n), 1.0_WP)*area(nz+1,n)/area(nz,n))*rsss*water_flux(n) -! end if -! end if + tr(nz)= -a(nz) * trarr(nz-1,n) & + -(b(nz)-dz)* trarr(nz,n) & + -c(nz) * trarr(nz+1,n) end do + nz=nzmax-1 dz=hnode_new(nz,n) - tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num)-(b(nz)-dz)*tr_arr(nz,n,tr_num) - !tr(nz)=-a(nz)*tr_arr(nz-1,n,tr_num)+a(nz)*tr_arr(nz,n,tr_num) + tr(nz)=-a(nz)*trarr(nz-1,n)-(b(nz)-dz)*trarr(nz,n) + !_______________________________________________________________________ + ! Add KPP nonlocal fluxes to the rhs (only T and S currently) + ! use here blmc or kpp_oblmixc instead of Kv, since Kv already contains + ! at this point the mixing enhancments from momix, instable + ! mixing or windmixing which are to much for nonlocal + ! transports and lead to instability of the model + if (use_kpp_nonlclflx) then + if (tracers%data(tr_num)%ID==2) then + rsss=ref_sss + if (ref_sss_local) rsss=tracers%data(tr_num)%values(1,n) + end if + + !___________________________________________________________________ + ! use fesom1.4 KPP + if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then + if (tracers%data(tr_num)%ID==1) then ! temperature + ! --> no fluxes to the top out of the surface, no fluxes + ! downwards out of the bottom + !___surface_________________________________________________ + nz = nzmin + tr(nz)=tr(nz) & + +(-MIN(ghats(nz+1,n)*blmc(nz+1,n,2), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + !___bulk____________________________________________________ + do nz=nzmin+1, nzmax-2 + tr(nz)=tr(nz) & + +( MIN(ghats(nz ,n)*blmc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + -MIN(ghats(nz+1,n)*blmc(nz+1,n,2), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + end do + !___bottom__________________________________________________ + nz = nzmax-1 + tr(nz)=tr(nz) & + +( MIN(ghats(nz ,n)*blmc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + + elseif (tracers%data(tr_num)%ID==2) then ! salinity + ! --> no fluxes to the top out of the surface, no fluxes + ! downwards out of the bottom + !___surface_________________________________________________ + nz = nzmin + tr(nz)=tr(nz) & + -(-MIN(ghats(nz+1,n)*blmc(nz+1,n,3), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + !___bulk____________________________________________________ + do nz=nzmin+1, nzmax-2 + tr(nz)=tr(nz) & + -( MIN(ghats(nz ,n)*blmc(nz ,n,3), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + -MIN(ghats(nz+1,n)*blmc(nz+1,n,3), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + end do + !___bottom__________________________________________________ + nz = nzmax-1 + tr(nz)=tr(nz) & + -( MIN(ghats(nz ,n)*blmc(nz ,n,3), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + end if + !___________________________________________________________________ + ! use cvmix KPP + elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then + if (tracers%data(tr_num)%ID==1) then ! temperature + !___surface_________________________________________________ + nz = nzmin + tr(nz)=tr(nz) & + +(-MIN(kpp_nonlcltranspT(nz+1,n)*kpp_oblmixc(nz+1,n,2), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + !___bulk____________________________________________________ + do nz=nzmin+1, nzmax-2 + tr(nz)=tr(nz) & + +( MIN(kpp_nonlcltranspT(nz ,n)*kpp_oblmixc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + -MIN(kpp_nonlcltranspT(nz+1,n)*kpp_oblmixc(nz+1,n,2), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + end do + !___bottom__________________________________________________ + nz = nzmax-1 + tr(nz)=tr(nz) & + +( MIN(kpp_nonlcltranspT(nz ,n)*kpp_oblmixc(nz ,n,2), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + ) * heat_flux(n) / vcpw * dt + + elseif (tracers%data(tr_num)%ID==2) then ! salinity + !___surface_________________________________________________ + nz = nzmin + tr(nz)=tr(nz) & + -(-MIN(kpp_nonlcltranspS(nz+1,n)*kpp_oblmixc(nz+1,n,3), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + !___bulk____________________________________________________ + do nz=nzmin+1, nzmax-2 + tr(nz)=tr(nz) & + -( MIN(kpp_nonlcltranspS(nz ,n)*kpp_oblmixc(nz ,n,3), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + -MIN(kpp_nonlcltranspS(nz+1,n)*kpp_oblmixc(nz+1,n,3), 1.0_WP)*(area(nz+1,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + end do + !___bottom__________________________________________________ + nz = nzmax-1 + tr(nz)=tr(nz) & + -( MIN(kpp_nonlcltranspS(nz ,n)*kpp_oblmixc(nz ,n,3), 1.0_WP)*(area(nz ,n)/areasvol(nz,n)) & + ) * rsss * water_flux(n) * dt + end if + end if + end if ! --> if (use_kpp_nonlclflx) then !_______________________________________________________________________ ! case of activated shortwave penetration into the ocean, ad 3d contribution - if (use_sw_pene .and. tr_num==1) then - !!PS do nz=1, nzmax-1 + if (use_sw_pene .and. tracers%data(tr_num)%ID==1) then do nz=nzmin, nzmax-1 zinv=1.0_WP*dt !/(zbar(nz)-zbar(nz+1)) ale! - tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n)*area(nz+1,n)/area(nz,n))*zinv + !!PS tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n) * ( area(nz+1,n)/areasvol(nz,n)) ) * zinv + tr(nz)=tr(nz)+(sw_3d(nz, n)-sw_3d(nz+1, n) * area(nz+1,n)/areasvol(nz,n)) * zinv end do end if !_______________________________________________________________________ ! The first row contains also the boundary condition from heatflux, ! freshwaterflux and relaxation terms - ! --> tr_arr(1,n,1)*water_flux(n) : latent heatflux contribution due to + ! --> trarr(1,n)*water_flux(n) : latent heatflux contribution due to ! cell volume. If Volume decreases --> temp has to raise, if volume ! expended --> temp has to decrease ! (-) ^ (-) ^ @@ -648,8 +833,7 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! (BUT CHECK!) | | | | ! v (+) v (+) ! - !!PS tr(1)= tr(1)+bc_surface(n, tracer_id(tr_num)) - tr(nzmin)= tr(nzmin)+bc_surface(n, tracer_id(tr_num),mesh) + tr(nzmin)= tr(nzmin)+bc_surface(n, tracers%data(tr_num)%ID, trarr(mesh%ulevels_nod2D(n),n), partit) !_______________________________________________________________________ ! The forward sweep algorithm to solve the three-diagonal matrix @@ -672,13 +856,10 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) ! --> dTnew_i = rhs'_i-c'_i*dTnew_i+1 ; i = n-1,n-2,...,1 ! ! initialize c-prime and s,t-prime - !!PS cp(1) = c(1)/b(1) - !!PS tp(1) = tr(1)/b(1) cp(nzmin) = c(nzmin)/b(nzmin) tp(nzmin) = tr(nzmin)/b(nzmin) ! solve for vectors c-prime and t, s-prime - !!PS do nz = 2,nzmax-1 do nz = nzmin+1,nzmax-1 m = b(nz)-cp(nz-1)*a(nz) cp(nz) = c(nz)/m @@ -689,7 +870,6 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) tr(nzmax-1) = tp(nzmax-1) ! solve for x from the vectors c-prime and d-prime - !!PS do nz = nzmax-2, 1, -1 do nz = nzmax-2, nzmin, -1 tr(nz) = tp(nz)-cp(nz)*tr(nz+1) end do @@ -697,35 +877,49 @@ subroutine diff_ver_part_impl_ale(tr_num, mesh) !_______________________________________________________________________ ! update tracer ! tr ... dTnew = T^(n+0.5) - T* - !!PS do nz=1,nzmax-1 do nz=nzmin,nzmax-1 - ! tr_arr - before ... T* - tr_arr(nz,n,tr_num)=tr_arr(nz,n,tr_num)+tr(nz) - ! tr_arr - after ... T^(n+0.5) = dTnew + T* = T^(n+0.5) - T* + T* - + ! trarr - before ... T* + trarr(nz,n)=trarr(nz,n)+tr(nz) end do - end do ! --> do n=1,myDim_nod2D + end do ! --> do n=1,myDim_nod2D +!$OMP END DO +!$OMP END PARALLEL end subroutine diff_ver_part_impl_ale ! ! !=============================================================================== -subroutine diff_ver_part_redi_expl(mesh) +subroutine diff_ver_part_redi_expl(tracers, partit, mesh) use o_ARRAYS - use g_PARSUP use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER USE o_param use g_config use g_comm_auto IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - integer :: elem,k - integer :: n2,nl1,ul1,nl2,nz,n - real(kind=WP) :: Tx, Ty - real(kind=WP) :: tr_xynodes(2,mesh%nl-1,myDim_nod2D+eDim_nod2D), vd_flux(mesh%nl) - -#include "associate_mesh.h" + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: n, k, elem, nz + integer :: n2, nl1, ul1, nl2 + real(kind=WP) :: Tx, Ty, vd_flux(mesh%nl) + real(kind=WP) :: tr_xynodes(2,mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), pointer :: del_ttf(:,:) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + del_ttf => tracers%work%del_ttf +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k, elem, nz, n2, nl1, ul1, nl2, Tx, Ty, vd_flux, zbar_n, z_n) +!$OMP DO + !___________________________________________________________________________ do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 ul1=ulevels_nod2D(n) @@ -734,74 +928,89 @@ subroutine diff_ver_part_redi_expl(mesh) Tx=0.0_WP Ty=0.0_WP do k=1, nod_in_elem2D_num(n) - elem=nod_in_elem2D(k,n) - !!PS if(nz.LE.(nlevels(elem)-1)) then - if( nz.LE.(nlevels(elem)-1) .and. nz.GE.(ulevels(elem))) then - Tx=Tx+tr_xy(1,nz,elem)*elem_area(elem) - Ty=Ty+tr_xy(2,nz,elem)*elem_area(elem) - endif - end do - tr_xynodes(1,nz,n)=tx/3.0_WP/area(nz,n) - tr_xynodes(2,nz,n)=ty/3.0_WP/area(nz,n) + elem=nod_in_elem2D(k,n) + !!PS if(nz.LE.(nlevels(elem)-1)) then + if( nz.LE.(nlevels(elem)-1) .and. nz.GE.(ulevels(elem))) then + Tx=Tx+tr_xy(1,nz,elem)*elem_area(elem) + Ty=Ty+tr_xy(2,nz,elem)*elem_area(elem) + endif + end do + tr_xynodes(1,nz,n)=tx/3.0_WP/areasvol(nz,n) + tr_xynodes(2,nz,n)=ty/3.0_WP/areasvol(nz,n) end do end do - ! call exchange_nod_begin(tr_xynodes) !NR the halo is not needed - +!$OMP END DO + ! no halo exchange of tr_xynodes is needed ! +!$OMP DO do n=1, myDim_nod2D nl1=nlevels_nod2D(n)-1 ul1=ulevels_nod2D(n) vd_flux=0._WP !_______________________________________________________________________ - zbar_n=0.0_WP - Z_n =0.0_WP -! zbar_n(nl1+1)=zbar(nl1+1) + zbar_n(1:mesh%nl )=0.0_WP + z_n (1:mesh%nl-1)=0.0_WP zbar_n(nl1+1)=zbar_n_bot(n) - Z_n(nl1)=zbar_n(nl1+1) + hnode_new(nl1,n)/2.0_WP - !!PS do nz=nl1, 2, -1 + z_n(nl1)=zbar_n(nl1+1) + hnode_new(nl1,n)/2.0_WP do nz=nl1, ul1+1, -1 zbar_n(nz) = zbar_n(nz+1) + hnode_new(nz,n) - Z_n(nz-1) = zbar_n(nz) + hnode_new(nz-1,n)/2.0_WP + z_n(nz-1) = zbar_n(nz) + hnode_new(nz-1,n)/2.0_WP end do - !!PS zbar_n(1) = zbar_n(2) + hnode_new(1,n) - zbar_n(ul1) = zbar_n(ul1+1) + hnode_new(ul1,n) + zbar_n(ul1) = zbar_n(ul1+1) + hnode_new(ul1,n) !_______________________________________________________________________ - !!PS do nz=2,nl1 do nz=ul1+1,nl1 - vd_flux(nz)=(Z_n(nz-1)-zbar_n(nz))*(slope_tapered(1,nz-1,n)*tr_xynodes(1,nz-1,n)+slope_tapered(2,nz-1,n)*tr_xynodes(2,nz-1,n))*Ki(nz-1,n) - vd_flux(nz)=vd_flux(nz)+& - (zbar_n(nz)-Z_n(nz)) *(slope_tapered(1,nz,n) *tr_xynodes(1,nz,n) +slope_tapered(2,nz,n) *tr_xynodes(2,nz,n)) *Ki(nz,n) - vd_flux(nz)=vd_flux(nz)/(Z_n(nz-1)-Z_n(nz))*area(nz,n) + vd_flux(nz)=(z_n(nz-1)-zbar_n(nz))*(slope_tapered(1,nz-1,n)*tr_xynodes(1,nz-1,n)+slope_tapered(2,nz-1,n)*tr_xynodes(2,nz-1,n))*Ki(nz-1,n) + vd_flux(nz)=vd_flux(nz)+& + (zbar_n(nz)-z_n(nz)) *(slope_tapered(1,nz,n) *tr_xynodes(1,nz,n) +slope_tapered(2,nz,n) *tr_xynodes(2,nz,n)) *Ki(nz,n) + vd_flux(nz)=vd_flux(nz)/(z_n(nz-1)-z_n(nz))*area(nz,n) enddo - !!PS do nz=1,nl1 do nz=ul1,nl1 - del_ttf(nz,n) = del_ttf(nz,n)+(vd_flux(nz) - vd_flux(nz+1))*dt/area(nz,n) + del_ttf(nz,n) = del_ttf(nz,n)+(vd_flux(nz) - vd_flux(nz+1))*dt/areasvol(nz,n) enddo end do -end subroutine diff_ver_part_redi_expl! +!$OMP END DO +!$OMP END PARALLEL +end subroutine diff_ver_part_redi_expl ! ! !=============================================================================== -subroutine diff_part_hor_redi(mesh) +subroutine diff_part_hor_redi(tracers, partit, mesh) use o_ARRAYS - use g_PARSUP use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER use o_param use g_config IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: deltaX1,deltaY1,deltaX2,deltaY2 + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: edge - integer :: n2,nl1,ul1,nl2,ul2,nl12,ul12,nz,el(2),elnodes(3),n,enodes(2) - real(kind=WP) :: c, Fx, Fy,Tx, Ty, Tx_z, Ty_z, SxTz, SyTz, Tz(2) + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + integer :: nl1, ul1, nl2, ul2, nl12, ul12, nz, el(2), elnodes(3), enodes(2) + real(kind=WP) :: c, Fx, Fy, Tx, Ty, Tx_z, Ty_z, SxTz, SyTz, Tz(2) real(kind=WP) :: rhs1(mesh%nl-1), rhs2(mesh%nl-1), Kh, dz real(kind=WP) :: isredi=0._WP + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), pointer :: del_ttf(:,:) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + del_ttf => tracers%work%del_ttf -#include "associate_mesh.h" - + !___________________________________________________________________________ if (Redi) isredi=1._WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, deltaX1, deltaY1, deltaX2, deltaY2, & +!$OMP nl1, ul1, nl2, ul2, nl12, ul12, nz, el, elnodes, enodes, & +!$OMP c, Fx, Fy, Tx, Ty, Tx_z, Ty_z, SxTz, SyTz, Tz, & +!$OMP rhs1, rhs2, Kh, dz) +!$OMP DO do edge=1, myDim_edge2D rhs1=0.0_WP rhs2=0.0_WP @@ -813,25 +1022,21 @@ subroutine diff_part_hor_redi(mesh) nl1=nlevels(el(1))-1 ul1=ulevels(el(1)) elnodes=elem2d_nodes(:,el(1)) - !Kh=elem_area(el(1)) !_______________________________________________________________________ nl2=0 ul2=0 if (el(2)>0) then - !Kh=0.5_WP*(Kh+elem_area(el(2))) nl2=nlevels(el(2))-1 ul2=ulevels(el(2)) deltaX2=edge_cross_dxdy(3,edge) deltaY2=edge_cross_dxdy(4,edge) endif - !Kh=K_hor*Kh/scale_area !_______________________________________________________________________ nl12=min(nl1,nl2) ul12=max(ul1,ul2) - !_______________________________________________________________________ ! (A) - do nz=ul1,ul12-1 + do nz=ul1, ul12-1 Kh=sum(Ki(nz, enodes))/2.0_WP dz=helem(nz, el(1)) Tz=0.5_WP*(tr_z(nz,enodes)+tr_z(nz+1,enodes)) @@ -845,7 +1050,6 @@ subroutine diff_part_hor_redi(mesh) rhs1(nz) = rhs1(nz) + c rhs2(nz) = rhs2(nz) - c end do - !_______________________________________________________________________ ! (B) if (ul2>0) then @@ -864,11 +1068,9 @@ subroutine diff_part_hor_redi(mesh) rhs2(nz) = rhs2(nz) - c end do end if - !_______________________________________________________________________ ! (C) - !!PS do nz=1,nl12 - do nz=ul12,nl12 + do nz=ul12, nl12 Kh=sum(Ki(nz, enodes))/2.0_WP dz=sum(helem(nz, el))/2.0_WP Tz=0.5_WP*(tr_z(nz,enodes)+tr_z(nz+1,enodes)) @@ -882,10 +1084,9 @@ subroutine diff_part_hor_redi(mesh) rhs1(nz) = rhs1(nz) + c rhs2(nz) = rhs2(nz) - c enddo - !_______________________________________________________________________ ! (D) - do nz=nl12+1,nl1 + do nz=nl12+1, nl1 Kh=sum(Ki(nz, enodes))/2.0_WP dz=helem(nz, el(1)) Tz=0.5_WP*(tr_z(nz,enodes)+tr_z(nz+1,enodes)) @@ -899,10 +1100,9 @@ subroutine diff_part_hor_redi(mesh) rhs1(nz) = rhs1(nz) + c rhs2(nz) = rhs2(nz) - c end do - !_______________________________________________________________________ ! (E) - do nz=nl12+1,nl2 + do nz=nl12+1, nl2 Kh=sum(Ki(nz, enodes))/2.0_WP dz=helem(nz, el(2)) Tz=0.5_WP*(tr_z(nz,enodes)+tr_z(nz+1,enodes)) @@ -916,111 +1116,188 @@ subroutine diff_part_hor_redi(mesh) rhs1(nz) = rhs1(nz) + c rhs2(nz) = rhs2(nz) - c end do - !_______________________________________________________________________ nl12=max(nl1,nl2) ul12 = ul1 if (ul2>0) ul12=min(ul1,ul2) - !!PS del_ttf(1:nl12,enodes(1))=del_ttf(1:nl12,enodes(1))+rhs1(1:nl12)*dt/area(1:nl12,enodes(1)) - !!PS del_ttf(1:nl12,enodes(2))=del_ttf(1:nl12,enodes(2))+rhs2(1:nl12)*dt/area(1:nl12,enodes(2)) - del_ttf(ul12:nl12,enodes(1))=del_ttf(ul12:nl12,enodes(1))+rhs1(ul12:nl12)*dt/area(ul12:nl12,enodes(1)) - del_ttf(ul12:nl12,enodes(2))=del_ttf(ul12:nl12,enodes(2))+rhs2(ul12:nl12)*dt/area(ul12:nl12,enodes(2)) - +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(enodes(1))) +#else +!$OMP ORDERED +#endif + del_ttf(ul12:nl12,enodes(1))=del_ttf(ul12:nl12,enodes(1))+rhs1(ul12:nl12)*dt/areasvol(ul12:nl12,enodes(1)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(1))) + call omp_set_lock (partit%plock(enodes(2))) +#endif + del_ttf(ul12:nl12,enodes(2))=del_ttf(ul12:nl12,enodes(2))+rhs2(ul12:nl12)*dt/areasvol(ul12:nl12,enodes(2)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(enodes(2))) +#else +!$OMP END ORDERED +#endif end do +!$OMP END DO +!$OMP END PARALLEL end subroutine diff_part_hor_redi ! ! !=============================================================================== -SUBROUTINE diff_part_bh(ttf, mesh) - use o_ARRAYS - use g_PARSUP +SUBROUTINE diff_part_bh(tr_num, dynamics, tracers, partit, mesh) + use o_ARRAYS, only: use MOD_MESH - use O_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN use o_param use g_config use g_comm_auto - IMPLICIT NONE - type(t_mesh), intent(in), target :: mesh - real(kind=WP), intent(inout), target :: ttf(mesh%nl-1, myDim_nod2D+eDim_nod2D) - real(kind=WP) :: u1, v1, len, vi, tt, ww - integer :: nz, ed, el(2), en(2), k, elem, nl1 - real(kind=WP), allocatable :: temporary_ttf(:,:) + integer, intent(in), target :: tr_num + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: n, nz, ed, el(2), en(2), k, elem, nzmin, nzmax + integer :: elnodes1(3), elnodes2(3) + real(kind=WP) :: u1, v1, len, vi, ww, tt(mesh%nl-1) + real(kind=WP), pointer :: temporary_ttf(:,:) + real(kind=WP), pointer :: UV(:,:,:) + real(kind=WP), pointer :: ttf(:,:) +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" -#include "associate_mesh.h" - ed=myDim_nod2D+eDim_nod2D - allocate(temporary_ttf(nl-1, ed)) + UV => dynamics%uv(:,:,:) + ttf => tracers%data(tr_num)%values + temporary_ttf => tracers%work%del_ttf !use already allocated working array. could be fct_LO instead etc. - temporary_ttf=0.0_8 - DO ed=1, myDim_edge2D+eDim_edge2D - if (myList_edge2D(ed)>edge2D_in) cycle +!$OMP PARALLEL DO + do n=1, myDim_nod2D+eDim_nod2D + temporary_ttf(:, n)=0.0_8 + end do +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, ed, el, en, k, elem, nzmin, nzmax, u1, v1, len, vi, tt, ww, & +!$OMP elnodes1, elnodes2) +!$OMP DO + DO ed=1, myDim_edge2D!+eDim_edge2D + if (myList_edge2D(ed) > edge2D_in) cycle el=edge_tri(:,ed) en=edges(:,ed) len=sqrt(sum(elem_area(el))) - nl1=maxval(nlevels_nod2D_min(en))-1 - DO nz=1,nl1 - u1=UV(1, nz,el(1))-UV(1, nz,el(2)) - v1=UV(2, nz,el(1))-UV(2, nz,el(2)) + nzmax = minval(nlevels(el)) + nzmin = maxval(ulevels(el)) + elnodes1=elem2d_nodes(:,el(1)) + elnodes2=elem2d_nodes(:,el(2)) + DO nz=nzmin, nzmax-1 + u1=maxval(ttf(nz, elnodes1))-minval(ttf(nz, elnodes2)) + v1=minval(ttf(nz, elnodes1))-maxval(ttf(nz, elnodes2)) vi=u1*u1+v1*v1 - tt=ttf(nz,en(1))-ttf(nz,en(2)) - vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) - !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) - !vi=sqrt(10.*le) - tt=tt*vi - temporary_ttf(nz,en(1))=temporary_ttf(nz,en(1))-tt - temporary_ttf(nz,en(2))=temporary_ttf(nz,en(2))+tt - END DO + tt(nz)=ttf(nz,en(1))-ttf(nz,en(2)) + vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & + max(tracers%data(tr_num)%gamma1_tra*sqrt(vi), & + tracers%data(tr_num)%gamma2_tra* vi) & + )*len) + tt(nz)=tt(nz)*vi + END DO +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(en(1))) +#else +!$OMP ORDERED +#endif + temporary_ttf(nzmin:nzmax-1,en(1))=temporary_ttf(nzmin:nzmax-1,en(1))-tt(nzmin:nzmax-1) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(en(1))) + call omp_set_lock (partit%plock(en(2))) +#endif + temporary_ttf(nzmin:nzmax-1,en(2))=temporary_ttf(nzmin:nzmax-1,en(2))+tt(nzmin:nzmax-1) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(en(2))) +#else +!$OMP END ORDERED +#endif END DO - call exchange_nod(temporary_ttf) +!$OMP END DO +!$OMP MASTER + call exchange_nod(temporary_ttf, partit) +!$OMP END MASTER +!$OMP BARRIER ! =========== ! Second round: ! =========== - DO ed=1, myDim_edge2D+eDim_edge2D +!$OMP DO + DO ed=1, myDim_edge2D!+eDim_edge2D if (myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) en=edges(:,ed) len=sqrt(sum(elem_area(el))) - nl1=maxval(nlevels_nod2D_min(en))-1 - DO nz=1,nl1 - u1=UV(1, nz,el(1))-UV(1, nz,el(2)) - v1=UV(2, nz,el(1))-UV(2, nz,el(2)) + nzmax = minval(nlevels(el)) + nzmin = maxval(ulevels(el)) + elnodes1=elem2d_nodes(:,el(1)) + elnodes2=elem2d_nodes(:,el(2)) + DO nz=nzmin, nzmax-1 + u1=maxval(ttf(nz, elnodes1))-minval(ttf(nz, elnodes2)) + v1=minval(ttf(nz, elnodes1))-maxval(ttf(nz, elnodes2)) vi=u1*u1+v1*v1 - tt=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) - vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) - !vi=sqrt(max(sqrt(u1*u1+v1*v1),0.04)*le) ! 10m^2/s for 10 km (0.04 h/50) - !vi=sqrt(10.*le) - tt=-tt*vi*dt - ttf(nz,en(1))=ttf(nz,en(1))-tt/area(nz,en(1)) - ttf(nz,en(2))=ttf(nz,en(2))+tt/area(nz,en(2)) + tt(nz)=temporary_ttf(nz,en(1))-temporary_ttf(nz,en(2)) + vi=sqrt(max(tracers%data(tr_num)%gamma0_tra, & + max(tracers%data(tr_num)%gamma1_tra*sqrt(vi), & + tracers%data(tr_num)%gamma2_tra* vi) & + )*len) + tt(nz)=-tt(nz)*vi*dt END DO - END DO - deallocate(temporary_ttf) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(en(1))) +#else +!$OMP ORDERED +#endif + ttf(nzmin:nzmax-1,en(1))=ttf(nzmin:nzmax-1,en(1))-tt(nzmin:nzmax-1)/area(nzmin:nzmax-1,en(1)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(en(1))) + call omp_set_lock (partit%plock(en(2))) +#endif + ttf(nzmin:nzmax-1,en(2))=ttf(nzmin:nzmax-1,en(2))+tt(nzmin:nzmax-1)/area(nzmin:nzmax-1,en(2)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(en(2))) +#else +!$OMP END ORDERED +#endif + END DO +!$OMP END DO +!$OMP END PARALLEL +call exchange_nod(ttf, partit) +!$OMP BARRIER end subroutine diff_part_bh ! ! !=============================================================================== ! this function returns a boundary conditions for a specified thacer ID and surface node ! ID = 0 and 1 are reserved for temperature and salinity -FUNCTION bc_surface(n, id, mesh) +FUNCTION bc_surface(n, id, sval, partit) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP USE o_ARRAYS USE g_forcing_arrays - USE g_PARSUP, only: mype, par_ex USE g_config implicit none - type(t_mesh), intent(in) , target :: mesh - REAL(kind=WP) :: bc_surface - integer, intent(in) :: n, id - character(len=10) :: id_string + integer, intent(in) :: n, id + real(kind=WP), intent(in) :: sval + type(t_partit),intent(inout), target :: partit + REAL(kind=WP) :: bc_surface + character(len=10) :: id_string ! --> is_nonlinfs=1.0 for zelvel,zstar .... ! --> is_nonlinfs=0.0 for linfs SELECT CASE (id) - CASE (0) - bc_surface=-dt*(heat_flux(n)/vcpw + tr_arr(mesh%ulevels_nod2D(n),n,1)*water_flux(n)*is_nonlinfs) CASE (1) + bc_surface=-dt*(heat_flux(n)/vcpw + sval*water_flux(n)*is_nonlinfs) + CASE (2) ! --> real_salt_flux(:): salt flux due to containment/releasing of salt ! by forming/melting of sea ice bc_surface= dt*(virtual_salt(n) & !--> is zeros for zlevel/zstar @@ -1034,12 +1311,12 @@ FUNCTION bc_surface(n, id, mesh) CASE (303) bc_surface=0.0_WP CASE DEFAULT - if (mype==0) then + if (partit%mype==0) then write (id_string, "(I3)") id - if (mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified in boundary conditions' - if (mype==0) write(*,*) 'the model will stop!' + if (partit%mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified in boundary conditions' + if (partit%mype==0) write(*,*) 'the model will stop!' end if - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) stop END SELECT RETURN diff --git a/src/oce_ale_vel_rhs.F90 b/src/oce_ale_vel_rhs.F90 index a362bd1fe..1ec929fe5 100644 --- a/src/oce_ale_vel_rhs.F90 +++ b/src/oce_ale_vel_rhs.F90 @@ -1,45 +1,89 @@ + +module compute_vel_rhs_interface + interface + subroutine compute_vel_rhs(ice, dynamics, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module + module momentum_adv_scalar_interface - interface - subroutine momentum_adv_scalar(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface + interface + subroutine momentum_adv_scalar(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module ! ! !_______________________________________________________________________________ -subroutine compute_vel_rhs(mesh) - use MOD_MESH - use o_ARRAYS - use i_ARRAYS - use i_therm_param +subroutine compute_vel_rhs(ice, dynamics, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use o_ARRAYS, only: ssh_gp, pgf_x, pgf_y use o_PARAM - use g_PARSUP use g_CONFIG use g_forcing_param, only: use_virt_salt use g_forcing_arrays, only: press_air use g_comm_auto use g_sbf, only: l_mslp use momentum_adv_scalar_interface - implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_ice) , intent(inout), target :: ice + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: elem, elnodes(3), nz, nzmax, nzmin real(kind=WP) :: ff, mm real(kind=WP) :: Fx, Fy, pre(3) logical, save :: lfirst=.true. - real(kind=WP) :: t1, t2, t3, t4 real(kind=WP) :: p_ice(3), p_air(3), p_eta(3) integer :: use_pice - -#include "associate_mesh.h" - - t1=MPI_Wtime() + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UV_rhs + real(kind=WP), dimension(:) , pointer :: eta_n + real(kind=WP), dimension(:) , pointer :: m_ice, m_snow, a_ice + real(kind=WP) , pointer :: rhoice, rhosno, inv_rhowat +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + UV_rhsAB => dynamics%uv_rhsAB(:,:,:) + eta_n => dynamics%eta_n(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + rhoice => ice%thermo%rhoice + rhosno => ice%thermo%rhosno + inv_rhowat=> ice%thermo%inv_rhowat + + !___________________________________________________________________________ use_pice=0 if (use_floatice .and. .not. trim(which_ale)=='linfs') use_pice=1 - + if ((toy_ocean) .and. (trim(which_toy)=="soufflet")) use_pice=0 + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, nz, nzmin, nzmax, elnodes, ff, mm, Fx, Fy, pre, p_ice, p_air, p_eta) do elem=1, myDim_elem2D nzmax = nlevels(elem) nzmin = ulevels(elem) @@ -59,7 +103,7 @@ subroutine compute_vel_rhs(mesh) ! p_eta=g*eta_n(elnodes)*(1-theta) !! this place needs update (1-theta)!!! p_eta = g*eta_n(elnodes) - ff = coriolis(elem)*elem_area(elem) + ff = mesh%coriolis(elem)*elem_area(elem) !mm=metric_factor(elem)*gg !___________________________________________________________________________ @@ -95,7 +139,7 @@ subroutine compute_vel_rhs(mesh) Fx = sum(gradient_sca(1:3,elem)*pre) Fy = sum(gradient_sca(4:6,elem)*pre) - !!PS do nz=1,nlevels(elem)-1 + do nz=nzmin,nzmax-1 ! add pressure gradient terms UV_rhs(1,nz,elem) = UV_rhs(1,nz,elem) + (Fx-pgf_x(nz,elem))*elem_area(elem) @@ -106,18 +150,16 @@ subroutine compute_vel_rhs(mesh) UV_rhsAB(2,nz,elem) =-UV(1,nz,elem)*ff! - mm*UV(1,nz,elem)*UV(2,nz,elem) end do end do - - t2=MPI_Wtime() +!$OMP END PARALLEL DO + !___________________________________________________________________________ ! advection - if (mom_adv==1) then + if (dynamics%momadv_opt==1) then if (mype==0) write(*,*) 'in moment not adapted mom_adv advection typ for ALE, check your namelist' - call par_ex(1) - elseif (mom_adv==2) then - call momentum_adv_scalar(mesh) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) + elseif (dynamics%momadv_opt==2) then + call momentum_adv_scalar(dynamics, partit, mesh) end if - t3=MPI_Wtime() - !___________________________________________________________________________ ! Update the rhs ff=(1.5_WP+epsilon) @@ -125,193 +167,266 @@ subroutine compute_vel_rhs(mesh) ff=1.0_WP lfirst=.false. end if - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, nz, nzmin, nzmax) do elem=1, myDim_elem2D nzmax = nlevels(elem) nzmin = ulevels(elem) - !!PS do nz=1,nlevels(elem)-1 do nz=nzmin,nzmax-1 UV_rhs(1,nz,elem)=dt*(UV_rhs(1,nz,elem)+UV_rhsAB(1,nz,elem)*ff)/elem_area(elem) UV_rhs(2,nz,elem)=dt*(UV_rhs(2,nz,elem)+UV_rhsAB(2,nz,elem)*ff)/elem_area(elem) end do end do +!$OMP END PARALLEL DO + ! ======================= ! U_rhs contains all contributions to velocity from old time steps ! ======================= - t4=MPI_Wtime() - ! if (mod(mstep,logfile_outfreq)==0 .and. mype==0) then - ! write(*,*) 'Momentum: ', t4-t1 - ! write(*,*) 'pres., Cor: ', t2-t1 - ! write(*,*) 'h adv ', t3-t2 - ! write(*,*) 'vert. part ', t4-t3 - ! end if END SUBROUTINE compute_vel_rhs -! =================================================================== + ! ! Momentum advection on scalar control volumes with ALE adaption--> exchange zinv(nz) ! against hnode(nz,node) !_______________________________________________________________________________ -subroutine momentum_adv_scalar(mesh) -USE MOD_MESH -USE o_ARRAYS -USE o_PARAM -USE g_PARSUP -use g_comm_auto -IMPLICIT NONE - -type(t_mesh), intent(in) , target :: mesh -integer :: n, nz, el1, el2 -integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule -real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) -real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) -real(kind=WP) :: Unode_rhs(2,mesh%nl-1,myDim_nod2d+eDim_nod2D) - -#include "associate_mesh.h" - -!_______________________________________________________________________________ -do n=1,myDim_nod2d - nl1 = nlevels_nod2D(n)-1 - ul1 = ulevels_nod2D(n) - wu(1:nl1+1) = 0._WP - wv(1:nl1+1) = 0._WP - - do k=1,nod_in_elem2D_num(n) - el = nod_in_elem2D(k,n) - nle = nlevels(el)-1 - ule = ulevels(el) - !___________________________________________________________________________ - ! The vertical part for each element is collected - !!PS wu(1) = wu(1) + UV(1,1,el)*elem_area(el) - !!PS wv(1) = wv(1) + UV(2,1,el)*elem_area(el) - wu(ule) = wu(ule) + UV(1,ule,el)*elem_area(el) - wv(ule) = wv(ule) + UV(2,ule,el)*elem_area(el) - - !!PS wu(2:nle) = wu(2:nle) + 0.5_WP*(UV(1,2:nle,el)+UV(1,1:nle-1,el))*elem_area(el) - !!PS wv(2:nle) = wv(2:nle) + 0.5_WP*(UV(2,2:nle,el)+UV(2,1:nle-1,el))*elem_area(el) - wu(ule+1:nle) = wu(ule+1:nle) + 0.5_WP*(UV(1,ule+1:nle,el)+UV(1,ule:nle-1,el))*elem_area(el) - wv(ule+1:nle) = wv(ule+1:nle) + 0.5_WP*(UV(2,ule+1:nle,el)+UV(2,ule:nle-1,el))*elem_area(el) - enddo - - !!PS wu(1:nl1) = wu(1:nl1)*Wvel_e(1:nl1,n) - !!PS wv(1:nl1) = wv(1:nl1)*Wvel_e(1:nl1,n) - wu(ul1:nl1) = wu(ul1:nl1)*Wvel_e(ul1:nl1,n) - wv(ul1:nl1) = wv(ul1:nl1)*Wvel_e(ul1:nl1,n) - - !!PS do nz=1,nl1 - do nz=ul1,nl1 - ! Here 1/3 because 1/3 of the area is related to the node - Unode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) - Unode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) - - enddo - - ! To get a clean checksum, set the remaining values to zero - Unode_rhs(1:2,nl1+1:nl-1,n) = 0._WP - Unode_rhs(1:2,1:ul1-1 ,n) = 0._WP -end do - - -!_______________________________________________________________________________ -DO ed=1, myDim_edge2D - nod = edges(:,ed) - el1 = edge_tri(1,ed) - el2 = edge_tri(2,ed) - nl1 = nlevels(el1)-1 - ul1 = ulevels(el1) - - !___________________________________________________________________________ - ! The horizontal part - !!PS un1(1:nl1) = UV(2,1:nl1,el1)*edge_cross_dxdy(1,ed) & - !!PS - UV(1,1:nl1,el1)*edge_cross_dxdy(2,ed) - un1(ul1:nl1) = UV(2,ul1:nl1,el1)*edge_cross_dxdy(1,ed) & - - UV(1,ul1:nl1,el1)*edge_cross_dxdy(2,ed) - !___________________________________________________________________________ - if (el2>0) then - nl2 = nlevels(el2)-1 - ul2 = ulevels(el2) - - !!PS un2(1:nl2) = - UV(2,1:nl2,el2)*edge_cross_dxdy(3,ed) & - !!PS + UV(1,1:nl2,el2)*edge_cross_dxdy(4,ed) - un2(ul2:nl2) = - UV(2,ul2:nl2,el2)*edge_cross_dxdy(3,ed) & - + UV(1,ul2:nl2,el2)*edge_cross_dxdy(4,ed) - - ! fill with zeros to combine the loops - ! Usually, no or only a very few levels have to be filled. In this case, - ! computing "zeros" is cheaper than the loop overhead. - un1(nl1+1:max(nl1,nl2)) = 0._WP - un2(nl2+1:max(nl1,nl2)) = 0._WP - un1(1:ul1-1) = 0._WP - un2(1:ul2-1) = 0._WP - - ! first edge node - ! Do not calculate on Halo nodes, as the result will not be used. - ! The "if" is cheaper than the avoided computiations. - if (nod(1) <= myDim_nod2d) then - !!PS do nz=1, max(nl1,nl2) - do nz=min(ul1,ul2), max(nl1,nl2) - Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) - Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) - end do - endif - - if (nod(2) <= myDim_nod2d) then - !!PS do nz=1, max(nl1,nl2) - do nz=min(ul1,ul2), max(nl1,nl2) - Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) - Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) - end do - endif +subroutine momentum_adv_scalar(dynamics, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + USE o_PARAM + use g_comm_auto + IMPLICIT NONE + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, nz, el1, el2 + integer :: nl1, nl2, ul1, ul2, nod(2), el, ed, k, nle, ule + real(kind=WP) :: un1(1:mesh%nl-1), un2(1:mesh%nl-1) + real(kind=WP) :: wu(1:mesh%nl), wv(1:mesh%nl) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhsAB, UVnode_rhs + real(kind=WP), dimension(:,:) , pointer :: Wvel_e +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV =>dynamics%uv(:,:,:) + UV_rhsAB =>dynamics%uv_rhsAB(:,:,:) + UVnode_rhs=>dynamics%work%uvnode_rhs(:,:,:) + Wvel_e =>dynamics%w_e(:,:) + !___________________________________________________________________________ + ! 1st. compute vertical momentum advection component: w * du/dz, w*dv/dz +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, el1, el2, nl1, nl2, ul1, ul2, nod, el, ed, k, nle, ule, un1, un2, wu, wv) +!$OMP DO + do n=1,myDim_nod2d + nl1 = nlevels_nod2D(n)-1 + ul1 = ulevels_nod2D(n) + wu(1:nl1+1) = 0._WP + wv(1:nl1+1) = 0._WP + + !_______________________________________________________________________ + ! loop over adjacent elements of vertice n + do k=1,nod_in_elem2D_num(n) + el = nod_in_elem2D(k,n) + !___________________________________________________________________ + nle = nlevels(el)-1 + ule = ulevels(el) + + !___________________________________________________________________ + ! accumulate horizontal velocities at full depth levels (top and + ! bottom faces of prism) + ! account here also for boundary condition below cavity --> + ! horizontal velocity at cavity-ocean interce ule (if ule>1) must be + ! zero ??? + if (ule==1) then + wu(ule) = wu(ule) + UV(1,ule,el)*elem_area(el) + wv(ule) = wv(ule) + UV(2,ule,el)*elem_area(el) + end if + + ! interpolate horizontal velocity from mid-depth levels to full + ! depth levels of upper and lower prism faces and average over adjacent + ! elements of vertice n + wu(ule+1:nle) = wu(ule+1:nle) + 0.5_WP*(UV(1,ule+1:nle,el)+UV(1,ule:nle-1,el))*elem_area(el) + wv(ule+1:nle) = wv(ule+1:nle) + 0.5_WP*(UV(2,ule+1:nle,el)+UV(2,ule:nle-1,el))*elem_area(el) + enddo + + !_______________________________________________________________________ + ! multiply w*du and w*dv + wu(ul1:nl1) = wu(ul1:nl1)*Wvel_e(ul1:nl1,n) + wv(ul1:nl1) = wv(ul1:nl1)*Wvel_e(ul1:nl1,n) + + !_______________________________________________________________________ + ! compute w*du/dz, w*dv/dz + do nz=ul1,nl1 + ! Here 1/3 because 1/3 of the area is related to the node --> comes from + ! averaging the elemental velocities + UVnode_rhs(1,nz,n) = - (wu(nz) - wu(nz+1) ) / (3._WP*hnode(nz,n)) + UVnode_rhs(2,nz,n) = - (wv(nz) - wv(nz+1) ) / (3._WP*hnode(nz,n)) + + enddo + + !_______________________________________________________________________ + ! To get a clean checksum, set the remaining values to zero + UVnode_rhs(1:2,nl1+1:nl-1,n) = 0._WP + UVnode_rhs(1:2,1:ul1-1 ,n) = 0._WP + end do +!$OMP END DO - else ! ed is a boundary edge, there is only the contribution from el1 - if (nod(1) <= myDim_nod2d) then - !!PS do nz=1, nl1 - do nz=ul1, nl1 + !___________________________________________________________________________ + ! 2nd. compute horizontal advection component: u*du/dx, u*dv/dx & v*du/dy, v*dv/dy + ! loop over triangle edges +!$OMP DO + do ed=1, myDim_edge2D + nod = edges(:,ed) + el1 = edge_tri(1,ed) + el2 = edge_tri(2,ed) + nl1 = nlevels(el1)-1 + ul1 = ulevels(el1) + + !_______________________________________________________________________ + ! compute horizontal normal velocity with respect to the edge from triangle + ! centroid towards triangel edge mid-pointe for element el1 + ! .o. + ! ./ \. + ! ./ el1 \. + ! ./ x \. + ! ./ |-------\.-----------------edge_cross_dxdy(1:2,ed) --> (dx,dy) + ! / |->n_vec \ + ! nod(1) o----------O----------o nod(2) + ! \. |->n_vec ./ + ! \. |------./------------------edge_cross_dxdy(3:4,ed) --> (dx,dy) + ! \. x ./ + ! \. el2 ./ + ! \. ./ + ! ° + un1(ul1:nl1) = UV(2,ul1:nl1,el1)*edge_cross_dxdy(1,ed) & + - UV(1,ul1:nl1,el1)*edge_cross_dxdy(2,ed) + + !_______________________________________________________________________ + ! compute horizontal normal velocity with respect to the edge from triangle + ! centroid towards triangel edge mid-pointe for element el2 when it is valid + ! --> if its a boundary triangle el2 will be not valid + if (el2>0) then ! --> el2 is valid element + nl2 = nlevels(el2)-1 + ul2 = ulevels(el2) + + un2(ul2:nl2) = - UV(2,ul2:nl2,el2)*edge_cross_dxdy(3,ed) & + + UV(1,ul2:nl2,el2)*edge_cross_dxdy(4,ed) - Unode_rhs(1,nz,nod(1)) = Unode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) - Unode_rhs(2,nz,nod(1)) = Unode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) - end do - endif - ! second edge node - if (nod(2) <= myDim_nod2d) then - !!PS do nz=1, nl1 - do nz=ul1, nl1 - Unode_rhs(1,nz,nod(2)) = Unode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - Unode_rhs(2,nz,nod(2)) = Unode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - end do - endif - endif + ! fill with zeros to combine the loops + ! Usually, no or only a very few levels have to be filled. In this case, + ! computing "zeros" is cheaper than the loop overhead. + un1(nl1+1:max(nl1,nl2)) = 0._WP + un2(nl2+1:max(nl1,nl2)) = 0._WP + un1(1:ul1-1) = 0._WP + un2(1:ul2-1) = 0._WP -end do +#if defined(__openmp_reproducible) +!$OMP ORDERED +#endif + + ! first edge node + ! Do not calculate on Halo nodes, as the result will not be used. + ! The "if" is cheaper than the avoided computiations. + if (nod(1) <= myDim_nod2d) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(nod(1))) +#endif + do nz=min(ul1,ul2), max(nl1,nl2) + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) + UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + un2(nz)*UV(1,nz,el2) + UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + un2(nz)*UV(2,nz,el2) + end do +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(nod(1))) +#endif + endif + + ! second edge node + if (nod(2) <= myDim_nod2d) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(nod(2))) +#endif + do nz=min(ul1,ul2), max(nl1,nl2) + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) + UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) - un2(nz)*UV(1,nz,el2) + UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) - un2(nz)*UV(2,nz,el2) + end do +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(nod(2))) +#endif + endif + + else ! el2 is not a valid element --> ed is a boundary edge, there is only the contribution from el1 + ! first edge node + if (nod(1) <= myDim_nod2d) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(nod(1))) +#endif + do nz=ul1, nl1 + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) + UVnode_rhs(1,nz,nod(1)) = UVnode_rhs(1,nz,nod(1)) + un1(nz)*UV(1,nz,el1) + UVnode_rhs(2,nz,nod(1)) = UVnode_rhs(2,nz,nod(1)) + un1(nz)*UV(2,nz,el1) + end do ! --> do nz=ul1, nl1 +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(nod(1))) +#endif + endif + + ! second edge node + if (nod(2) <= myDim_nod2d) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(nod(2))) +#endif + do nz=ul1, nl1 + ! add w*du/dz+(u*du/dx+v*du/dy) & w*dv/dz+(u*dv/dx+v*dv/dy) + UVnode_rhs(1,nz,nod(2)) = UVnode_rhs(1,nz,nod(2)) - un1(nz)*UV(1,nz,el1) + UVnode_rhs(2,nz,nod(2)) = UVnode_rhs(2,nz,nod(2)) - un1(nz)*UV(2,nz,el1) + end do ! --> do nz=ul1, nl1 +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(nod(2))) +#endif + endif + endif ! --> if (el2>0) then -!_______________________________________________________________________________ -do n=1,myDim_nod2d - nl1 = nlevels_nod2D(n)-1 - ul1 = ulevels_nod2D(n) - - !!PS Unode_rhs(1,1:nl1,n) = Unode_rhs(1,1:nl1,n) *area_inv(1:nl1,n) - !!PS Unode_rhs(2,1:nl1,n) = Unode_rhs(2,1:nl1,n) *area_inv(1:nl1,n) - Unode_rhs(1,ul1:nl1,n) = Unode_rhs(1,ul1:nl1,n) *area_inv(ul1:nl1,n) - Unode_rhs(2,ul1:nl1,n) = Unode_rhs(2,ul1:nl1,n) *area_inv(ul1:nl1,n) -end do +#if defined(__openmp_reproducible) +!$OMP END ORDERED +#endif -!_______________________________________________________________________________ -call exchange_nod(Unode_rhs) + end do ! --> do ed=1, myDim_edge2D +!$OMP END DO -!_______________________________________________________________________________ -do el=1, myDim_elem2D - nl1 = nlevels(el)-1 - ul1 = ulevels(el) - !!PS UV_rhsAB(1:2,1:nl1,el) = UV_rhsAB(1:2,1:nl1,el) & - !!PS + elem_area(el)*(Unode_rhs(1:2,1:nl1,elem2D_nodes(1,el)) & - !!PS + Unode_rhs(1:2,1:nl1,elem2D_nodes(2,el)) & - !!PS + Unode_rhs(1:2,1:nl1,elem2D_nodes(3,el))) / 3.0_WP - UV_rhsAB(1:2,ul1:nl1,el) = UV_rhsAB(1:2,ul1:nl1,el) & - + elem_area(el)*(Unode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & - + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & - + Unode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP - -end do + !___________________________________________________________________________ + ! divide total nodal advection by scalar area +!$OMP DO + do n=1,myDim_nod2d + nl1 = nlevels_nod2D(n)-1 + ul1 = ulevels_nod2D(n) + UVnode_rhs(1,ul1:nl1,n) = UVnode_rhs(1,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) + UVnode_rhs(2,ul1:nl1,n) = UVnode_rhs(2,ul1:nl1,n) *areasvol_inv(ul1:nl1,n) + end do !-->do n=1,myDim_nod2d +!$OMP END DO + !___________________________________________________________________________ +!$OMP MASTER + call exchange_nod(UVnode_rhs, partit) +!$OMP END MASTER +!$OMP BARRIER + !___________________________________________________________________________ + ! convert total nodal advection from vertice --> elements +!$OMP DO + do el=1, myDim_elem2D + nl1 = nlevels(el)-1 + ul1 = ulevels(el) + UV_rhsAB(1:2,ul1:nl1,el) = UV_rhsAB(1:2,ul1:nl1,el) & + + elem_area(el)*(UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(1,el)) & + + UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(2,el)) & + + UVnode_rhs(1:2,ul1:nl1,elem2D_nodes(3,el))) / 3.0_WP + + end do ! --> do el=1, myDim_elem2D +!$OMP END DO +!$OMP END PARALLEL end subroutine momentum_adv_scalar diff --git a/src/oce_dyn.F90 b/src/oce_dyn.F90 index 6bd7225e9..a0f88f62f 100755 --- a/src/oce_dyn.F90 +++ b/src/oce_dyn.F90 @@ -1,118 +1,94 @@ ! A set of routines for computing the horizonlal viscosity ! the control parameters (their default values) are: -! gamma0 (0.01 [m/s]), gamma1 (0.1 [no dim.]), gamma2 (10.[s/m]), Div_c [1.], Leith_c[1.?] -! 1. gamma0 has the dimension of velocity. It should be as small as possible, but in any case smaller than 0.01 m/s. +! dynamics%visc_gamma0 (0.01 [m/s]), dynamics%visc_gamma1 (0.1 [no dim.]), dynamics%visc_gamma2 (10.[s/m]), Div_c [1.], Leith_c[1.?] +! 1. dynamics%visc_gamma0 has the dimension of velocity. It should be as small as possible, but in any case smaller than 0.01 m/s. ! All major ocean circulation models are stable with harmonic viscosity 0.01*len. -! 2. gamma1 is nondimensional. In commonly used Leith or Smagorinsky parameterizations it is C/pi^2=0.1 (C is about 1). +! 2. dynamics%visc_gamma1 is nondimensional. In commonly used Leith or Smagorinsky parameterizations it is C/pi^2=0.1 (C is about 1). ! We therefore try to follow this, allowing some adjustments (because our mesh is triangular, our resolution is different, etc.). -! We however, try to keep gamma1<0.1 -! 3. gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. It is only used in: +! We however, try to keep dynamics%visc_gamma1<0.1 +! 3. dynamics%visc_gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. It is only used in: ! (5) visc_filt_bcksct, (6) visc_filt_bilapl, (7) visc_filt_bidiff -! 4. Div_c =1. should be default -! 5. Leith_c=? (need to be adjusted) -module h_viscosity_leith_interface - interface - subroutine h_viscosity_leith(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module -module visc_filt_harmon_interface - interface - subroutine visc_filt_harmon(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module -module visc_filt_hbhmix_interface - interface - subroutine visc_filt_hbhmix(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module -module visc_filt_biharm_interface - interface - subroutine visc_filt_biharm(option, mesh) - use mod_mesh - integer :: option - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module module visc_filt_bcksct_interface interface - subroutine visc_filt_bcksct(mesh) + subroutine visc_filt_bcksct(dynamics, partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module + module visc_filt_bilapl_interface interface - subroutine visc_filt_bilapl(mesh) + subroutine visc_filt_bilapl(dynamics, partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module + module visc_filt_bidiff_interface interface - subroutine visc_filt_bidiff(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module -module visc_filt_dbcksc_interface - interface - subroutine visc_filt_dbcksc(mesh) + subroutine visc_filt_bidiff(dynamics, partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine end interface end module -module backscatter_coef_interface - interface - subroutine backscatter_coef(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module -module uke_update_interface - interface - subroutine uke_update(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module - -! =================================================================== +! ! Contains routines needed for computations of dynamics. ! includes: update_vel, compute_vel_nodes -! =================================================================== -SUBROUTINE update_vel(mesh) +!_______________________________________________________________________________ +SUBROUTINE update_vel(dynamics, partit, mesh) USE MOD_MESH - USE o_ARRAYS + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN USE o_PARAM - USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE - integer :: elem, elnodes(3), nz, m, nzmax, nzmin + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, elem, elnodes(3), nz, nzmin, nzmax real(kind=WP) :: eta(3) real(kind=WP) :: Fx, Fy - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) + + !___________________________________________________________________________ +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax, eta, Fx, Fy) DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) eta=-g*theta*dt*d_eta(elnodes) @@ -120,33 +96,52 @@ SUBROUTINE update_vel(mesh) Fy=sum(gradient_sca(4:6,elem)*eta) nzmin = ulevels(elem) nzmax = nlevels(elem) - !!PS DO nz=1, nlevels(elem)-1 DO nz=nzmin, nzmax-1 UV(1,nz,elem)= UV(1,nz,elem) + UV_rhs(1,nz,elem) + Fx UV(2,nz,elem)= UV(2,nz,elem) + UV_rhs(2,nz,elem) + Fy END DO END DO - eta_n=eta_n+d_eta - call exchange_elem(UV) +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO + DO n=1, myDim_nod2D+eDim_nod2D + eta_n(n)=eta_n(n)+d_eta(n) + END DO +!$OMP END PARALLEL DO + call exchange_elem(UV, partit) +!$OMP BARRIER end subroutine update_vel -!========================================================================== -subroutine compute_vel_nodes(mesh) +! +! +!_______________________________________________________________________________ +subroutine compute_vel_nodes(dynamics, partit, mesh) USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN USE o_PARAM - USE o_ARRAYS - USE g_PARSUP use g_comm_auto IMPLICIT NONE + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: n, nz, k, elem, nln, uln, nle, ule real(kind=WP) :: tx, ty, tvol - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV=>dynamics%uv(:,:,:) + UVnode=>dynamics%uvnode(:,:,:) + !___________________________________________________________________________ +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, k, elem, nln, uln, nle, ule, tx, ty, tvol) DO n=1, myDim_nod2D uln = ulevels_nod2D(n) nln = nlevels_nod2D(n) - !!PS DO nz=1, nlevels_nod2D(n)-1 DO nz=uln, nln-1 tvol=0.0_WP tx =0.0_WP @@ -161,464 +156,161 @@ subroutine compute_vel_nodes(mesh) tx=tx+UV(1,nz,elem)*elem_area(elem) ty=ty+UV(2,nz,elem)*elem_area(elem) END DO - Unode(1,nz,n)=tx/tvol - Unode(2,nz,n)=ty/tvol + UVnode(1,nz,n)=tx/tvol + UVnode(2,nz,n)=ty/tvol END DO END DO - call exchange_nod(Unode) +!$OMP END PARALLEL DO + call exchange_nod(UVnode, partit) +!$OMP BARRIER end subroutine compute_vel_nodes -!=========================================================================== -subroutine viscosity_filter(option, mesh) -use o_PARAM -use g_PARSUP -use MOD_MESH -use h_viscosity_leith_interface -use visc_filt_harmon_interface -use visc_filt_hbhmix_interface -use visc_filt_biharm_interface -use visc_filt_bcksct_interface -use visc_filt_bilapl_interface -use visc_filt_bidiff_interface -use visc_filt_dbcksc_interface -use backscatter_coef_interface -IMPLICIT NONE -integer :: option -type(t_mesh), intent(in) , target :: mesh -! Driving routine -! Background viscosity is selected in terms of Vl, where V is -! background velocity scale and l is the resolution. V is 0.005 -! or 0.01, perhaps it would be better to pass it as a parameter. - -! h_viscosity_leiht needs vorticity, so vorticity array should be -! allocated. At present, there are two rounds of smoothing in -! h_viscosity. - -SELECT CASE (option) -CASE (1) - ! ==== - ! Harmonic Leith parameterization - ! ==== - call h_viscosity_leith(mesh) - call visc_filt_harmon(mesh) -CASE (2) - ! === - ! Laplacian+Leith+biharmonic background - ! === - call h_viscosity_leith(mesh) - call visc_filt_hbhmix(mesh) -CASE (3) - ! === - ! Biharmonic Leith parameterization - ! === - call h_viscosity_leith(mesh) - call visc_filt_biharm(2, mesh) -CASE (4) - ! === - ! Biharmonic+upwind-type - ! === - call visc_filt_biharm(1, mesh) -CASE (5) - call visc_filt_bcksct(mesh) -CASE (6) - call visc_filt_bilapl(mesh) -CASE (7) - call visc_filt_bidiff(mesh) -CASE (8) - call backscatter_coef(mesh) - call visc_filt_dbcksc(mesh) -CASE DEFAULT - if (mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' - call par_ex - stop -END SELECT -end subroutine viscosity_filter -! =================================================================== -SUBROUTINE visc_filt_harmon(mesh) -USE MOD_MESH -USE o_ARRAYS -USE o_PARAM -USE g_PARSUP -USE g_CONFIG -IMPLICIT NONE - -real(kind=WP) :: u1, v1, le(2), len, vi -integer :: nz, ed, el(2) , nzmin,nzmax -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! An analog of harmonic viscosity operator. - ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area - ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. - ! The contribution from boundary edges is neglected (free slip). - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - len=sqrt(sum(elem_area(el(1:2)))) - nzmax = minval(nlevels(el)) - nzmin = maxval(ulevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - vi=0.5_WP*(Visc(nz,el(1))+Visc(nz,el(2))) - vi=max(vi, gamma0*len)*dt ! limited from below by backgroung - u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi - v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi - - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO -end subroutine visc_filt_harmon -! =================================================================== -SUBROUTINE visc_filt_biharm(option, mesh) - USE MOD_MESH - USE o_ARRAYS - USE o_PARAM - USE g_PARSUP - USE g_CONFIG - use g_comm_auto - IMPLICIT NONE - ! An energy conserving version - ! Also, we use the Leith viscosity - ! - real(kind=WP) :: u1, v1, vi, len - integer :: ed, el(2), nz, option, nzmin, nzmax - real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! Filter is applied twice. - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) - U_c=0.0_WP - V_c=0.0_WP - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmax = minval(nlevels(el)) - nzmin = maxval(ulevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO - END DO - - if(option==1) then - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - nzmin = ulevels(ed) - nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 - Do nz=nzmin,nzmax-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! ==== - ! Case 1 -- an analog to the third-order upwind (vi=gamma1 * |u| * l) - ! ==== - vi=max(gamma0, gamma1*sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len*dt - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - end if - - if(option==2) then - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - nzmin = ulevels(ed) - nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 - Do nz=nzmin,nzmax-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! === - ! Case 2 -- Leith +background (do not forget to call h_viscosity_leith before using this option) - ! === - vi=max(Visc(nz,ed), gamma0*len)*dt ! limited from below by backgroung - ! - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - end if - - call exchange_elem(U_c) - call exchange_elem(V_c) - DO ed=1, myDim_edge2D+eDim_edge2D - ! check if its a boudnary edge - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmin = maxval(ulevels(el)) - nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - - deallocate(V_c,U_c) - -end subroutine visc_filt_biharm -! =================================================================== -SUBROUTINE visc_filt_hbhmix(mesh) - USE MOD_MESH - USE o_ARRAYS - USE o_PARAM - USE g_PARSUP - USE g_CONFIG - use g_comm_auto - IMPLICIT NONE - - ! An energy and momentum conserving version. - ! We use the harmonic Leith viscosity + biharmonic background viscosity - ! - - real(kind=WP) :: u1, v1, vi, len, crosslen, le(2) - integer :: ed, el(2), nz, nzmin, nzmax - real(kind=WP), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! Filter is applied twice. - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) - U_c=0.0_WP - V_c=0.0_WP - DO ed=1, myDim_edge2D+eDim_edge2D - ! check if its a boudnary edge - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmin = maxval(ulevels(el)) - nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - vi=dt*0.5_WP*(Visc(nz,el(1))+Visc(nz,el(2))) - ! backgroung is added later (biharmonically) - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - u1=u1*vi - v1=v1*vi - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - nzmin = ulevels(ed) - nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 - Do nz=nzmin,nzmax-1 - vi=dt*gamma0*len ! add biharmonic backgroung - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - call exchange_elem(U_c) - call exchange_elem(V_c) - DO ed=1, myDim_edge2D+eDim_edge2D - ! check if its a boudnary edge - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - nzmin = maxval(ulevels(el)) - nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 - DO nz=nzmin,nzmax-1 - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - - deallocate(V_c,U_c) - -end subroutine visc_filt_hbhmix - -! =================================================================== -SUBROUTINE h_viscosity_leith(mesh) - ! - ! Coefficient of horizontal viscosity is a combination of the Leith (with Leith_c) and modified Leith (with Div_c) - USE MOD_MESH - USE o_ARRAYS - USE o_PARAM - USE g_PARSUP - USE g_CONFIG - use g_comm_auto - IMPLICIT NONE - real(kind=WP) :: dz, div_elem(3), xe, ye, vi - integer :: elem, nl1, nz, elnodes(3), n, k, nt, ul1 - real(kind=WP) :: leithx, leithy - real(kind=WP), allocatable :: aux(:,:) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! - if(mom_adv<4) call relative_vorticity(mesh) !!! vorticity array should be allocated - ! Fill in viscosity: - Visc = 0.0_WP - DO elem=1, myDim_elem2D !! m=1, myDim_elem2D - !! elem=myList_elem2D(m) - !_______________________________________________________________________ - ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because - ! they run over elements here - nl1 =nlevels(elem)-1 - ul1 =ulevels(elem) - - zbar_n=0.0_WP - ! in case of partial cells zbar_n(nzmax) is not any more at zbar(nzmax), - ! zbar_n(nzmax) is now zbar_e_bot(elem), - zbar_n(nl1+1)=zbar_e_bot(elem) - !!PS do nz=nl1,2,-1 - do nz=nl1,ul1+1,-1 - zbar_n(nz) = zbar_n(nz+1) + helem(nz,elem) - end do - !!PS zbar_n(1) = zbar_n(2) + helem(1,elem) - zbar_n(ul1) = zbar_n(ul1+1) + helem(ul1,elem) - - !_______________________________________________________________________ - elnodes=elem2D_nodes(:,elem) - !!PS do nz=1,nl1 - do nz=ul1,nl1 - dz=zbar_n(nz)-zbar_n(nz+1) - div_elem=(Wvel(nz,elnodes)-Wvel(nz+1,elnodes))/dz - xe=sum(gradient_sca(1:3,elem)*div_elem) - ye=sum(gradient_sca(4:6,elem)*div_elem) - div_elem=vorticity(nz,elnodes) - leithx=sum(gradient_sca(1:3,elem)*div_elem) - leithy=sum(gradient_sca(4:6,elem)*div_elem) - Visc(nz,elem)=min(gamma1*elem_area(elem)*sqrt((Div_c*(xe**2+ye**2) & - + Leith_c*(leithx**2+leithy**2))*elem_area(elem)), elem_area(elem)/dt) - end do !! 0.1 here comes from (2S)^{3/2}/pi^3 - do nz=nl1+1, nl-1 - Visc(nz, elem)=0.0_WP - end do - do nz=1,ul1-1 - Visc(nz, elem)=0.0_WP - end do - END DO +! +! +!_______________________________________________________________________________ +subroutine viscosity_filter(option, dynamics, partit, mesh) + use o_PARAM + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + use visc_filt_bcksct_interface + use visc_filt_bilapl_interface + use visc_filt_bidiff_interface + use g_backscatter + IMPLICIT NONE + integer :: option + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh - allocate(aux(nl-1,myDim_nod2D+eDim_nod2D)) - aux = 0.0_WP - DO nt=1,2 - DO n=1, myDim_nod2D - nl1 = nlevels_nod2D(n) - ul1 = ulevels_nod2D(n) - !!PS DO nz=1, nlevels_nod2D(n)-1 - DO nz=ul1, nl1-1 - dz=0.0_WP - vi=0.0_WP - DO k=1, nod_in_elem2D_num(n) - elem=nod_in_elem2D(k,n) - dz=dz+elem_area(elem) - vi=vi+Visc(nz,elem)*elem_area(elem) - END DO - aux(nz,n)=vi/dz - END DO - END DO - call exchange_nod(aux) - do elem=1, myDim_elem2D - elnodes=elem2D_nodes(:,elem) - nl1=nlevels(elem)-1 - ul1=ulevels(elem) - !!!PS Do nz=1, nl1 - Do nz=ul1, nl1 - Visc(nz,elem)=sum(aux(nz,elnodes))/3.0_WP - END DO - DO nz=nl1+1, nl-1 - Visc(nz,elem)=0.0_WP - END Do - DO nz=1, ul1-1 - Visc(nz,elem)=0.0_WP - END Do - end do - end do - call exchange_elem(Visc) - deallocate(aux) -END subroutine h_viscosity_leith -! ======================================================================= -SUBROUTINE visc_filt_bcksct(mesh) + ! Driving routine + ! Background viscosity is selected in terms of Vl, where V is + ! background velocity scale and l is the resolution. V is 0.005 + ! or 0.01, perhaps it would be better to pass it as a parameter. + + ! h_viscosity_leiht needs vorticity, so vorticity array should be + ! allocated. At present, there are two rounds of smoothing in + ! h_viscosity. + SELECT CASE (option) + CASE (5) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bcksct'//achar(27)//'[0m' + call visc_filt_bcksct(dynamics, partit, mesh) + CASE (6) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bilapl'//achar(27)//'[0m' + call visc_filt_bilapl(dynamics, partit, mesh) + CASE (7) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_bidiff'//achar(27)//'[0m' + call visc_filt_bidiff(dynamics, partit, mesh) + CASE (8) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call backscatter_coef'//achar(27)//'[0m' + call backscatter_coef(partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[37m'//' --> call visc_filt_dbcksc'//achar(27)//'[0m' + call visc_filt_dbcksc(dynamics, partit, mesh) + CASE DEFAULT + if (partit%mype==0) write(*,*) 'mixing scheme with option ' , option, 'has not yet been implemented' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + END SELECT +end subroutine viscosity_filter +! +! +!_______________________________________________________________________________ +SUBROUTINE visc_filt_bcksct(dynamics, partit, mesh) USE MOD_MESH - USE o_ARRAYS + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN USE o_PARAM - USE g_PARSUP USE g_CONFIG USE g_comm_auto IMPLICIT NONE - - real(kind=8) :: u1, v1, len, vi + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + real(kind=8) :: u1, v1, len, vi integer :: nz, ed, el(2), nelem(3),k, elem, nzmin, nzmax - real(kind=8), allocatable :: U_b(:,:), V_b(:,:), U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - + ! still to be understood but if you allocate these arrays statically the results will be different: + real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c, U_b, V_b +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv( :,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) + U_b => dynamics%work%u_b(:,:) + V_b => dynamics%work%v_b(:,:) + + !___________________________________________________________________________ ! An analog of harmonic viscosity operator. ! Same as visc_filt_h, but with the backscatter. ! Here the contribution from squared velocities is added to the viscosity. - ! The contribution from boundary edges is neglected (free slip). + ! The contribution from boundary edges is neglected (free slip). +!$OMP PARALLEL DO + DO elem=1, myDim_elem2D+eDim_elem2D + U_b(:, elem) = 0.0_WP + V_b(:, elem) = 0.0_WP + U_c(:, elem) = 0.0_WP + V_c(:, elem) = 0.0_WP + END DO +!$OMP END PARALLEL DO - ed=myDim_elem2D+eDim_elem2D - allocate(U_b(nl-1,ed), V_b(nl-1, ed)) - ed=myDim_nod2D+eDim_nod2D - allocate(U_c(nl-1,ed), V_c(nl-1,ed)) - U_b=0.0_WP - V_b=0.0_WP - U_c=0.0_WP - V_c=0.0_WP +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(u1, v1, len, vi, nz, ed, el, nelem, k, elem, nzmin, nzmax, update_u, update_v) +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) len=sqrt(sum(elem_area(el))) nzmax = minval(nlevels(el)) nzmin = maxval(ulevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 u1=UV(1,nz,el(1))-UV(1,nz,el(2)) v1=UV(2,nz,el(1))-UV(2,nz,el(2)) - vi=dt*max(gamma0, max(gamma1*sqrt(u1*u1+v1*v1), gamma2*(u1*u1+v1*v1)))*len -! vi=dt*max(gamma0, gamma1*max(sqrt(u1*u1+v1*v1), gamma2*(u1*u1+v1*v1)))*len - !here gamma2 is dimensional (1/velocity). If it is 10, then the respective term dominates starting from |u|=0.1 m/s an so on. - u1=u1*vi - v1=v1*vi - U_b(nz,el(1))=U_b(nz,el(1))-u1/elem_area(el(1)) - U_b(nz,el(2))=U_b(nz,el(2))+u1/elem_area(el(2)) - V_b(nz,el(1))=V_b(nz,el(1))-v1/elem_area(el(1)) - V_b(nz,el(2))=V_b(nz,el(2))+v1/elem_area(el(2)) + vi=dt*max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(u1*u1+v1*v1), & + dynamics%visc_gamma2*(u1*u1+v1*v1)) & + )*len + update_u(nz)=u1*vi + update_v(nz)=v1*vi END DO +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED +#endif + U_b(nzmin:nzmax-1, el(1))=U_b(nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1)/elem_area(el(1)) + V_b(nzmin:nzmax-1, el(1))=V_b(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock (partit%plock(el(2))) +#endif + U_b(nzmin:nzmax-1, el(2))=U_b(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) + V_b(nzmin:nzmax-1, el(2))=V_b(nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED +#endif END DO - call exchange_elem(U_b) - call exchange_elem(V_b) +!$OMP END DO +!$OMP MASTER + call exchange_elem(U_b, partit) + call exchange_elem(V_b, partit) +!$OMP END MASTER +!$OMP BARRIER ! =========== ! Compute smoothed viscous term: ! =========== +!$OMP DO DO ed=1, myDim_nod2D nzmin = ulevels_nod2D(ed) nzmax = nlevels_nod2D(ed) - !!PS DO nz=1, nlevels_nod2D(ed)-1 DO nz=nzmin, nzmax-1 vi=0.0_WP u1=0.0_WP @@ -633,522 +325,290 @@ SUBROUTINE visc_filt_bcksct(mesh) V_c(nz,ed)=v1/vi END DO END DO - call exchange_nod(U_c) - call exchange_nod(V_c) +!$OMP END DO +!$OMP MASTER + call exchange_nod(U_c, V_c, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO do ed=1, myDim_elem2D nelem=elem2D_nodes(:,ed) nzmin = ulevels(ed) nzmax = nlevels(ed) - !!PS Do nz=1, nlevels(ed)-1 Do nz=nzmin, nzmax-1 - UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+U_b(nz,ed) -easy_bs_return*sum(U_c(nz,nelem))/3.0_WP - UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -easy_bs_return*sum(V_c(nz,nelem))/3.0_WP + UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+U_b(nz,ed) -dynamics%visc_easybsreturn*sum(U_c(nz,nelem))/3.0_WP + UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+V_b(nz,ed) -dynamics%visc_easybsreturn*sum(V_c(nz,nelem))/3.0_WP END DO end do - deallocate(V_c,U_c,V_b,U_b) +!$OMP END DO +!$OMP END PARALLEL end subroutine visc_filt_bcksct - -! =================================================================== +! +! +!_______________________________________________________________________________ ! Strictly energy dissipative and momentum conserving version ! Viscosity depends on velocity Laplacian, i.e., on an analog of ! the Leith viscosity (Lapl==second derivatives) ! \nu=|3u_c-u_n1-u_n2-u_n3|*sqrt(S_c)/100. There is an additional term ! in viscosity that is proportional to the velocity amplitude squared. ! The coefficient has to be selected experimentally. -SUBROUTINE visc_filt_bilapl(mesh) +SUBROUTINE visc_filt_bilapl(dynamics, partit, mesh) USE MOD_MESH - USE o_ARRAYS + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN USE o_PARAM - USE g_PARSUP USE g_CONFIG USE g_comm_auto IMPLICIT NONE + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ real(kind=8) :: u1, v1, vi, len - integer :: ed, el(2), nz, nzmin, nzmax - real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" -! - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) - U_c=0.0_WP - V_c=0.0_WP + integer :: ed, el(2), elem, nz, nzmin, nzmax + real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) + + !___________________________________________________________________________ +!$OMP PARALLEL DO + DO elem=1, myDim_elem2D+eDim_elem2D + U_c(:, elem) = 0.0_WP + V_c(:, elem) = 0.0_WP + END DO +!$OMP END PARALLEL DO + +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(u1, v1, len, vi, ed, el, nz, nzmin, nzmax) +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO + update_u(nz)=(UV(1,nz,el(1))-UV(1,nz,el(2))) + update_v(nz)=(UV(2,nz,el(1))-UV(2,nz,el(2))) + END DO +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED +#endif + U_c(nzmin:nzmax-1, el(1))=U_c(nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1) + V_c(nzmin:nzmax-1, el(1))=V_c(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock (partit%plock(el(2))) +#endif + U_c(nzmin:nzmax-1, el(2))=U_c(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1) + V_c(nzmin:nzmax-1, el(2))=V_c(nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED +#endif END DO - - Do ed=1,myDim_elem2D +!$OMP END DO + +!$OMP DO + DO ed=1,myDim_elem2D len=sqrt(elem_area(ed)) nzmin = ulevels(ed) nzmax = nlevels(ed) - !!PS Do nz=1,nlevels(ed)-1 Do nz=nzmin,nzmax-1 ! vi has the sense of harmonic viscosity coef. because of ! division by area in the end u1=U_c(nz,ed)**2+V_c(nz,ed)**2 - vi=max(gamma0, max(gamma1*sqrt(u1), gamma2*u1))*len*dt + vi=max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(u1), & + dynamics%visc_gamma2*u1) & + )*len*dt U_c(nz,ed)=-U_c(nz,ed)*vi V_c(nz,ed)=-V_c(nz,ed)*vi END DO - end do - - call exchange_elem(U_c) - call exchange_elem(V_c) + END DO +!$OMP END DO +!$OMP MASTER + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) + update_u(nz)=(U_c(nz,el(1))-U_c(nz,el(2))) + update_v(nz)=(V_c(nz,el(1))-V_c(nz,el(2))) END DO - END DO - deallocate(V_c,U_c) - +#if defined(_OPENMP) && ! defined(__openmp_reproducible) + call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED +#endif + UV_rhs(1, nzmin:nzmax-1, el(1))=UV_rhs(1, nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1)/elem_area(el(1)) + UV_rhs(2, nzmin:nzmax-1, el(1))=UV_rhs(2, nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock (partit%plock(el(2))) +#endif + UV_rhs(1, nzmin:nzmax-1, el(2))=UV_rhs(1, nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) + UV_rhs(2, nzmin:nzmax-1, el(2))=UV_rhs(2, nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED +#endif + END DO +!$OMP END DO +!$OMP END PARALLEL end subroutine visc_filt_bilapl -! =================================================================== +! +! +!_______________________________________________________________________________ ! Strictly energy dissipative and momentum conserving version ! Viscosity depends on velocity differences, and is introduced symmetrically ! into both stages of biharmonic operator ! On each edge, \nu=sqrt(|u_c1-u_c2|*sqrt(S_c1+S_c2)/100) ! The effect is \nu^2 ! Quadratic in velocity term can be introduced if needed. -SUBROUTINE visc_filt_bidiff(mesh) +SUBROUTINE visc_filt_bidiff(dynamics, partit, mesh) USE MOD_MESH - USE o_MESH - USE o_ARRAYS + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN USE o_PARAM - USE g_PARSUP USE g_CONFIG USE g_comm_auto IMPLICIT NONE - real(kind=8) :: u1, v1, vi, len - integer :: ed, el(2), nz, nzmin, nzmax - real(kind=8), allocatable :: U_c(:,:), V_c(:,:) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" - - ! - ed=myDim_elem2D+eDim_elem2D - allocate(U_c(nl-1,ed), V_c(nl-1, ed)) - U_c=0.0_WP - V_c=0.0_WP + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + real(kind=8) :: u1, v1, len, vi + integer :: ed, el(2), nz, nzmin, nzmax, elem + real(kind=8) :: update_u(mesh%nl-1), update_v(mesh%nl-1) + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV, UV_rhs + real(kind=WP), dimension(:,:) , pointer :: U_c, V_c +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + UV_rhs => dynamics%uv_rhs(:,:,:) + U_c => dynamics%work%u_c(:,:) + V_c => dynamics%work%v_c(:,:) + + !___________________________________________________________________________ +!$OMP PARALLEL DO + DO elem=1, myDim_elem2D+eDim_elem2D + U_c(:, elem) = 0.0_WP + V_c(:, elem) = 0.0_WP + END DO +!$OMP END PARALLEL DO +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(u1, v1, len, vi, ed, el, nz, nzmin, nzmax) +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) len=sqrt(sum(elem_area(el))) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) vi=u1*u1+v1*v1 - vi=sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) - ! vi=sqrt(max(gamma0, gamma1*max(sqrt(vi), gamma2*vi))*len) - u1=u1*vi - v1=v1*vi - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO + vi=sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) + ! vi=sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) + update_u(nz)=u1*vi + update_v(nz)=v1*vi + END DO +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED +#endif + U_c(nzmin:nzmax-1, el(1))=U_c(nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1) + V_c(nzmin:nzmax-1, el(1))=V_c(nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock (partit%plock(el(2))) +#endif + U_c(nzmin:nzmax-1, el(2))=U_c(nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1) + V_c(nzmin:nzmax-1, el(2))=V_c(nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED +#endif + END DO - - call exchange_elem(U_c) - call exchange_elem(V_c) +!$OMP END DO +!$OMP MASTER + call exchange_elem(U_c, partit) + call exchange_elem(V_c, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO DO ed=1, myDim_edge2D+eDim_edge2D if(myList_edge2D(ed)>edge2D_in) cycle el=edge_tri(:,ed) len=sqrt(sum(elem_area(el))) nzmin = maxval(ulevels(el)) nzmax = minval(nlevels(el)) - !!PS DO nz=1,minval(nlevels(el))-1 DO nz=nzmin,nzmax-1 u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) vi=u1*u1+v1*v1 - vi=-dt*sqrt(max(gamma0, max(gamma1*sqrt(vi), gamma2*vi))*len) - ! vi=-dt*sqrt(max(gamma0, gamma1*max(sqrt(vi), gamma2*vi))*len) - u1=vi*(U_c(nz,el(1))-U_c(nz,el(2))) - v1=vi*(V_c(nz,el(1))-V_c(nz,el(2))) - UV_rhs(1,nz,el(1))=UV_rhs(1,nz,el(1))-u1/elem_area(el(1)) - UV_rhs(1,nz,el(2))=UV_rhs(1,nz,el(2))+u1/elem_area(el(2)) - UV_rhs(2,nz,el(1))=UV_rhs(2,nz,el(1))-v1/elem_area(el(1)) - UV_rhs(2,nz,el(2))=UV_rhs(2,nz,el(2))+v1/elem_area(el(2)) - END DO - END DO - deallocate(V_c, U_c) - -end subroutine visc_filt_bidiff -! =================================================================== - - -! =================================================================== -SUBROUTINE visc_filt_dbcksc(mesh) -USE MOD_MESH -USE o_ARRAYS -USE o_PARAM -USE g_PARSUP -USE g_CONFIG -USE g_comm_auto -USE g_support -USE uke_update_interface -IMPLICIT NONE - -real(kind=8) :: u1, v1, le(2), len, crosslen, vi, uke1 -integer :: nz, ed, el(2) -real(kind=8), allocatable :: U_c(:,:), V_c(:,:), UV_back(:,:,:), UV_dis(:,:,:), uke_d(:,:) -real(kind=8), allocatable :: uuu(:) -type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" - ! An analog of harmonic viscosity operator. - ! It adds to the rhs(0) Visc*(u1+u2+u3-3*u0)/area - ! on triangles, which is Visc*Laplacian/4 on equilateral triangles. - ! The contribution from boundary edges is neglected (free slip). - ! Filter is applied twice. - -ed=myDim_elem2D+eDim_elem2D -allocate(U_c(nl-1,ed), V_c(nl-1, ed)) -allocate(UV_back(2,nl-1,ed), UV_dis(2,nl-1, ed)) -allocate(uke_d(nl-1,ed)) -allocate(uuu(ed)) - - U_c=0.0_8 - V_c=0.0_8 - UV_back=0.0_8 - UV_dis=0.0_8 - uke_d=0.0_8 - - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - DO nz=1,minval(nlevels(el))-1 - u1=(UV(1,nz,el(1))-UV(1,nz,el(2))) - v1=(UV(2,nz,el(1))-UV(2,nz,el(2))) - - U_c(nz,el(1))=U_c(nz,el(1))-u1 - U_c(nz,el(2))=U_c(nz,el(2))+u1 - V_c(nz,el(1))=V_c(nz,el(1))-v1 - V_c(nz,el(2))=V_c(nz,el(2))+v1 - END DO - END DO - - - Do ed=1,myDim_elem2D - len=sqrt(elem_area(ed)) - len=dt*len/30.0_8 - Do nz=1,nlevels(ed)-1 - ! vi has the sense of harmonic viscosity coefficient because of - ! the division by area in the end - ! ==== - ! Case 1 -- an analog to the third-order upwind (vi=|u|l/12) - ! ==== - vi=max(0.2_8,sqrt(UV(1,nz,ed)**2+UV(2,nz,ed)**2))*len - U_c(nz,ed)=-U_c(nz,ed)*vi - V_c(nz,ed)=-V_c(nz,ed)*vi - END DO - end do - - - call exchange_elem(U_c) - call exchange_elem(V_c) - - DO ed=1, myDim_edge2D+eDim_edge2D - if(myList_edge2D(ed)>edge2D_in) cycle - el=edge_tri(:,ed) - le=edge_dxdy(:,ed) - le(1)=le(1)*sum(elem_cos(el))*0.25_8 - len=sqrt(le(1)**2+le(2)**2)*r_earth - le(1)=edge_cross_dxdy(1,ed)-edge_cross_dxdy(3,ed) - le(2)=edge_cross_dxdy(2,ed)-edge_cross_dxdy(4,ed) - crosslen=sqrt(le(1)**2+le(2)**2) - DO nz=1,minval(nlevels(el))-1 - vi=dt*len*(v_back(nz,el(1))+v_back(nz,el(2)))/crosslen - !if(mype==0) write(*,*) 'vi ', vi , ' and ed' , ed - !if(mype==0) write(*,*) 'dt*len/crosslen ', dt*len/crosslen, ' and ed' , ed - !vi=max(vi,0.005*len*dt) ! This helps to reduce noise in places where - ! Visc is small and decoupling might happen - !Backscatter contribution - u1=(UV(1,nz,el(1))-UV(1,nz,el(2)))*vi - v1=(UV(2,nz,el(1))-UV(2,nz,el(2)))*vi - - !UKE diffusion - vi=dt*len*(K_back*sqrt(elem_area(el(1))/scale_area)+K_back*sqrt(elem_area(el(2))/scale_area))/crosslen - - uke1=(uke(nz,el(1))-uke(nz,el(2)))*vi - - - UV_back(1,nz,el(1))=UV_back(1,nz,el(1))-u1/elem_area(el(1)) - UV_back(1,nz,el(2))=UV_back(1,nz,el(2))+u1/elem_area(el(2)) - UV_back(2,nz,el(1))=UV_back(2,nz,el(1))-v1/elem_area(el(1)) - UV_back(2,nz,el(2))=UV_back(2,nz,el(2))+v1/elem_area(el(2)) - - !Correct scaling for the diffusion? - uke_d(nz,el(1))=uke_d(nz,el(1))-uke1/elem_area(el(1)) - uke_d(nz,el(2))=uke_d(nz,el(2))+uke1/elem_area(el(2)) - - - - !Biharmonic contribution - u1=(U_c(nz,el(1))-U_c(nz,el(2))) - v1=(V_c(nz,el(1))-V_c(nz,el(2))) - - UV_dis(1,nz,el(1))=UV_dis(1,nz,el(1))-u1/elem_area(el(1)) - UV_dis(1,nz,el(2))=UV_dis(1,nz,el(2))+u1/elem_area(el(2)) - UV_dis(2,nz,el(1))=UV_dis(2,nz,el(1))-v1/elem_area(el(1)) - UV_dis(2,nz,el(2))=UV_dis(2,nz,el(2))+v1/elem_area(el(2)) - - END DO - END DO - -call exchange_elem(UV_back) - -DO nz=1, nl-1 - uuu=0.0_8 - uuu=UV_back(1,nz,:) - call smooth_elem(uuu,smooth_back_tend, mesh) - UV_back(1,nz,:)=uuu - uuu=0.0_8 - uuu=UV_back(2,nz,:) - call smooth_elem(uuu,smooth_back_tend, mesh) - UV_back(2,nz,:)=uuu -END DO - - DO ed=1, myDim_elem2D - DO nz=1,nlevels(ed)-1 - UV_rhs(1,nz,ed)=UV_rhs(1,nz,ed)+UV_dis(1,nz,ed)+UV_back(1,nz,ed) - UV_rhs(2,nz,ed)=UV_rhs(2,nz,ed)+UV_dis(2,nz,ed)+UV_back(2,nz,ed) - END DO - END DO - - UV_dis_tend=UV_dis!+UV_back - UV_total_tend=UV_dis+UV_back - UV_back_tend=UV_back - uke_dif=uke_d - - call uke_update(mesh) - deallocate(V_c,U_c) - deallocate(UV_dis,UV_back) - deallocate(uke_d) - deallocate(uuu) - -end subroutine visc_filt_dbcksc -!=========================================================================== - -SUBROUTINE backscatter_coef(mesh) -USE MOD_MESH -USE o_ARRAYS -USE o_PARAM -USE g_PARSUP -USE g_CONFIG -use g_comm_auto -IMPLICIT NONE -type(t_mesh), intent(in) , target :: mesh -integer :: elem, nz -#include "associate_mesh.h" - -!Potentially add the Rossby number scaling to the script... -!check if sign is right! Different in the Jansen paper -!Also check with the normalization by area; as before we use element length sqrt(2*elem_area(ed)) - -v_back=0.0_8 -DO elem=1, myDim_elem2D - DO nz=1,nlevels(elem)-1 -!v_back(1,ed)=c_back*sqrt(2.0_WP*elem_area(ed))*sqrt(max(2.0_WP*uke(1,ed),0.0_WP))*(3600.0_WP*24.0_WP/tau_c)*4.0_WP/sqrt(2.0_WP*elem_area(ed))**2 !*sqrt(max(2.0_WP*uke(1,ed),0.0_WP)) -!v_back(nz,elem)=-c_back*sqrt(4._8/sqrt(3.0_8)*elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)) !Is the scaling correct -v_back(nz,elem)=min(-c_back*sqrt(elem_area(elem))*sqrt(max(2.0_8*uke(nz,elem),0.0_8)),0.2*elem_area(elem)/dt) !Is the scaling correct -!Scaling by sqrt(2*elem_area) or sqrt(elem_area)? - END DO -END DO - -call exchange_elem(v_back) - -end subroutine backscatter_coef -!=========================================================================== - -SUBROUTINE uke_update(mesh) -USE MOD_MESH -USE o_ARRAYS -USE o_PARAM -USE g_PARSUP -USE g_CONFIG -use g_comm_auto -USE g_support -USE g_rotate_grid -IMPLICIT NONE - -!I had to change uke(:) to uke(:,:) to make output and restart work!! - -!Why is it necessary to implement the length of the array? It doesn't work without! -!integer, intent(in) :: t_levels -type(t_mesh), intent(in) , target :: mesh -!real(kind=8), dimension(:,:,:), intent(in) :: UV_dis, UV_back -!real(kind=8), dimension(:,:), intent(in) :: UV_dif -!real(kind=8), intent(in) :: UV_dis(nl-1,myDim_elem2D+eDim_elem2D), UV_back(nl-1, myDim_elem2D+eDim_elem2D) -!real(kind=8), intent(in) :: UV_dif(nl-1,myDim_elem2D+eDim_elem2D) -real(kind=8) :: hall, h1_eta, hnz, vol -integer :: elnodes(3), nz, ed, edi, node, j, elem, q -real(kind=8), allocatable :: uuu(:), work_array(:), U_work(:,:), V_work(:,:), rosb_array(:,:), work_uv(:) -integer :: kk, nzmax, el -real(kind=8) :: c1, rosb, vel_u, vel_v, vel_uv, scaling, reso -real*8 :: c_min=0.5, f_min=1.e-6, r_max=200000., ex, ey, a1, a2, len_reg, dist_reg(2) ! Are those values still correct? -#include "associate_mesh.h" -!rosb_dis=1._8 !Should be variable to control how much of the dissipated energy is backscattered -!rossby_num=2 - -ed=myDim_elem2D+eDim_elem2D -allocate(uuu(ed)) - -uke_back=0.0_8 -uke_dis=0.0_8 -DO ed=1, myDim_elem2D -DO nz=1, nlevels(ed)-1 - uke_dis(nz,ed)=(UV(1,nz,ed)*UV_dis_tend(1,nz,ed)+UV(2,nz,ed)*UV_dis_tend(2,nz,ed)) - uke_back(nz,ed)=(UV(1,nz,ed)*UV_back_tend(1,nz,ed)+UV(2,nz,ed)*UV_back_tend(2,nz,ed)) -END DO -END DO - -DO nz=1,nl-1 - uuu=0.0_8 - uuu=uke_back(nz,:) - call smooth_elem(uuu,smooth_back, mesh) !3) ? - uke_back(nz,:)=uuu -END DO - - - -!Timestepping use simple backward timestepping; all components should have dt in it, unless they need it twice -!Amplitudes should be right given the correction of the viscosities; check for all, also for biharmonic -!uke(1,ed)=uke(1,ed)-uke_dis(1,ed)-uke_back(1,ed)+uke_dif(1,ed) -ed=myDim_elem2D+eDim_elem2D -allocate(U_work(nl-1,myDim_nod2D+eDim_nod2D),V_work(nl-1,myDim_nod2D+eDim_nod2D)) -allocate(work_uv(myDim_nod2D+eDim_nod2D)) -allocate(rosb_array(nl-1,ed)) -call exchange_elem(UV) -rosb_array=0._8 -DO nz=1, nl-1 - work_uv=0._WP - DO node=1, myDim_nod2D - vol=0._WP - U_work(nz,node)=0._WP - V_work(nz,node)=0._WP - DO j=1, nod_in_elem2D_num(node) - elem=nod_in_elem2D(j, node) - U_work(nz,node)=U_work(nz,node)+UV(1,nz,elem)*elem_area(elem) - V_work(nz,node)=V_work(nz,node)+UV(2,nz,elem)*elem_area(elem) - vol=vol+elem_area(elem) - END DO - U_work(nz,node)=U_work(nz,node)/vol - V_work(nz,node)=U_work(nz,node)/vol - END DO - work_uv=U_work(nz,:) - call exchange_nod(work_uv) - U_work(nz,:)=work_uv - work_uv=V_work(nz,:) - call exchange_nod(work_uv) - V_work(nz,:)=work_uv -END DO - - DO el=1,myDim_elem2D - DO nz=1, nlevels(el)-1 - rosb_array(nz,el)=sqrt((sum(gradient_sca(1:3,el)*U_work(nz,elem2D_nodes(1:3,el)))-& - sum(gradient_sca(4:6, el)*V_work(nz,elem2D_nodes(1:3,el))))**2+& - (sum(gradient_sca(4:6, el)*U_work(nz,elem2D_nodes(1:3,el)))+& - sum(gradient_sca(1:3, el)*V_work(nz,elem2D_nodes(1:3,el))))**2) -! hall=hall+hnz - END DO -! rosb_array(el)=rosb_array(el)/hall - END DO -DO ed=1, myDim_elem2D - scaling=1._WP - IF(uke_scaling) then - reso=sqrt(elem_area(ed)*4._wp/sqrt(3._wp)) - rosb=0._wp - elnodes=elem2D_nodes(:, ed) - DO kk=1,3 - c1=0._wp - nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(elnodes(kk)), elnodes(kk))), 1) - !Vertical average; same scaling in the vertical - DO nz=1, nzmax-1 - c1=c1+hnode_new(nz,elnodes(kk))*(sqrt(max(bvfreq(nz,elnodes(kk)), 0._WP))+sqrt(max(bvfreq(nz+1,elnodes(kk)), 0._WP)))/2. + vi=-dt*sqrt(max(dynamics%visc_gamma0, & + max(dynamics%visc_gamma1*sqrt(vi), & + dynamics%visc_gamma2*vi) & + )*len) + ! vi=-dt*sqrt(max(dynamics%visc_gamma0, dynamics%visc_gamma1*max(sqrt(vi), dynamics%visc_gamma2*vi))*len) + update_u(nz)=vi*(U_c(nz,el(1))-U_c(nz,el(2))) + update_v(nz)=vi*(V_c(nz,el(1))-V_c(nz,el(2))) END DO - c1=max(c_min, c1/pi) !ca. first baroclinic gravity wave speed limited from below by c_min - !Cutoff K_GM depending on (Resolution/Rossby radius) ratio - rosb=rosb+min(c1/max(abs(coriolis_node(elnodes(kk))), f_min), r_max) - END DO - rosb=rosb/3._8 - scaling=1._WP/(1._WP+(uke_scaling_factor*reso/rosb))!(4._wp*reso/rosb)) - END IF - - DO nz=1, nlevels(ed)-1 - elnodes=elem2D_nodes(:,ed) - - !Taking out that one place where it is always weird (Pacific Southern Ocean) - !Should not really be used later on, once we fix the issue with the 1/4 degree grid - if(.not. (TRIM(which_toy)=="soufflet")) then - call elem_center(ed, ex, ey) - !a1=-104.*rad - !a2=-49.*rad - call g2r(-104.*rad, -49.*rad, a1, a2) - dist_reg(1)=ex-a1 - dist_reg(2)=ey-a2 - call trim_cyclic(dist_reg(1)) - dist_reg(1)=dist_reg(1)*elem_cos(ed) - dist_reg=dist_reg*r_earth - len_reg=sqrt(dist_reg(1)**2+dist_reg(2)**2) - - - !if(mype==0) write(*,*) 'len_reg ', len_reg , ' and dist_reg' , dist_reg, ' and ex, ey', ex, ey, ' and a ', a1, a2 - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) - !uke_dif(nz, ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) - uke_dis(nz,ed)=scaling*(1-exp(-len_reg/300000))*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) - else - rosb_array(nz,ed)=rosb_array(nz,ed)/max(abs(sum(coriolis_node(elnodes(:)))), f_min) - !uke_dif(nz, ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)!UV_dif(1,ed) - uke_dis(nz,ed)=scaling*1._8/(1._8+rosb_array(nz,ed)/rosb_dis)*uke_dis(nz,ed) - end if - +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(el(1))) +#else +!$OMP ORDERED +#endif + UV_rhs(1, nzmin:nzmax-1, el(1))=UV_rhs(1, nzmin:nzmax-1, el(1))-update_u(nzmin:nzmax-1)/elem_area(el(1)) + UV_rhs(2, nzmin:nzmax-1, el(1))=UV_rhs(2, nzmin:nzmax-1, el(1))-update_v(nzmin:nzmax-1)/elem_area(el(1)) + +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(1))) + call omp_set_lock (partit%plock(el(2))) +#endif + + UV_rhs(1, nzmin:nzmax-1, el(2))=UV_rhs(1, nzmin:nzmax-1, el(2))+update_u(nzmin:nzmax-1)/elem_area(el(2)) + UV_rhs(2, nzmin:nzmax-1, el(2))=UV_rhs(2, nzmin:nzmax-1, el(2))+update_v(nzmin:nzmax-1)/elem_area(el(2)) + +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(el(2))) +#else +!$OMP END ORDERED +#endif END DO -END DO -deallocate(U_work, V_work) -deallocate(rosb_array) -deallocate(work_uv) -call exchange_elem(uke_dis) -!call exchange_elem(uke_dif) -DO nz=1, nl-1 - uuu=uke_dis(nz,:) - call smooth_elem(uuu,smooth_dis, mesh) - uke_dis(nz,:)=uuu -END DO -DO ed=1, myDim_elem2D - DO nz=1,nlevels(ed)-1 - uke_rhs_old(nz,ed)=uke_rhs(nz,ed) - uke_rhs(nz,ed)=-uke_dis(nz,ed)-uke_back(nz,ed)+uke_dif(nz,ed) - uke(nz,ed)=uke(nz,ed)+1.5_8*uke_rhs(nz,ed)-0.5_8*uke_rhs_old(nz,ed) - END DO -END DO -call exchange_elem(uke) - -deallocate(uuu) -end subroutine uke_update +!$OMP END DO +!$OMP END PARALLEL +end subroutine visc_filt_bidiff -! =================================================================== diff --git a/src/oce_fer_gm.F90 b/src/oce_fer_gm.F90 index 3f95cd951..f450be553 100644 --- a/src/oce_fer_gm.F90 +++ b/src/oce_fer_gm.F90 @@ -1,3 +1,35 @@ +module fer_solve_interface + interface + subroutine fer_solve_Gamma(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + + subroutine fer_gamma2vel(dynamics, partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + end subroutine + + subroutine init_Redi_GM(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + + + !--------------------------------------------------------------------------- !Implementation of Gent & McWiliams parameterization after R. Ferrari et al., 2010 !Contains: @@ -5,23 +37,32 @@ ! fer_gamma2vel ! fer_compute_C_K ! this subroutine shall be a subject of future tuning (with respect to fer_k) !=========================================================================== -subroutine fer_solve_Gamma(mesh) +subroutine fer_solve_Gamma(partit, mesh) USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP USE o_PARAM - USE o_ARRAYS, ONLY: sigma_xy, fer_gamma, bvfreq, fer_c, fer_K, zbar_n, Z_n, hnode_new, zbar_n_bot - USE g_PARSUP + USE o_ARRAYS, ONLY: sigma_xy, fer_gamma, bvfreq, fer_c, fer_K USE g_CONFIG use g_comm_auto IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh integer :: nz, n, nzmax, nzmin real(kind=WP) :: zinv1,zinv2, zinv, m, r real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl) real(kind=WP) :: cp(mesh%nl), tp(2,mesh%nl) + real(kind=WP) :: zbar_n(mesh%nl), z_n(mesh%nl-1) + real(kind=WP), dimension(:,:), pointer :: tr -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, zinv1,zinv2, zinv, m, r, a, b, c, cp, tp, tr, zbar_n, Z_n) +!$OMP DO DO n=1,myDim_nod2D tr=>fer_gamma(:,:,n) ! !_____________________________________________________________________ @@ -115,18 +156,22 @@ subroutine fer_solve_Gamma(mesh) tr(:,nz) = tp(:,nz)-cp(nz)*tr(:,nz+1) end do END DO !!! cycle over nodes - - call exchange_nod(fer_gamma) +!$OMP END DO +!$OMP END PARALLEL + call exchange_nod(fer_gamma, partit) +!$OMP BARRIER END subroutine fer_solve_Gamma ! ! ! !==================================================================== -subroutine fer_gamma2vel(mesh) +subroutine fer_gamma2vel(dynamics, partit, mesh) USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN USE o_PARAM - USE o_ARRAYS, ONLY: fer_gamma, fer_uv, helem - USE g_PARSUP + USE o_ARRAYS, ONLY: fer_gamma USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -134,10 +179,19 @@ subroutine fer_gamma2vel(mesh) integer :: nz, nzmax, el, elnod(3), nzmin real(kind=WP) :: zinv real(kind=WP) :: onethird=1._WP/3._WP - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in), target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: fer_UV + real(kind=WP), dimension(:,:) , pointer :: fer_Wvel +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + fer_UV =>dynamics%fer_uv(:,:,:) + fer_Wvel =>dynamics%fer_w(:,:) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(el, elnod, nz, nzmin, nzmax, zinv) DO el=1, myDim_elem2D elnod=elem2D_nodes(:,el) ! max. number of levels at element el @@ -150,21 +204,25 @@ subroutine fer_gamma2vel(mesh) fer_uv(2,nz,el)=sum(fer_gamma(2,nz,elnod)-fer_gamma(2,nz+1,elnod))*zinv END DO END DO - call exchange_elem(fer_uv) +!$OMP END PARALLEL DO + call exchange_elem(fer_uv, partit) +!$OMP BARRIER end subroutine fer_gamma2vel ! ! ! !=============================================================================== -subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi +subroutine init_Redi_GM(partit, mesh) !fer_compute_C_K_Redi USE MOD_MESH USE o_PARAM - USE o_ARRAYS, ONLY: fer_c, fer_k, fer_scal, Ki, bvfreq, MLD1_ind, neutral_slope, coriolis_node, hnode_new, Z_3d_n - USE g_PARSUP + USE o_ARRAYS, ONLY: fer_c, fer_k, fer_scal, Ki, bvfreq, MLD1_ind, neutral_slope + USE MOD_PARTIT + USE MOD_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: n, nz, nzmax, nzmin real(kind=WP) :: reso, c1, rosb, scaling, rr_ratio, aux_zz(mesh%nl) real(kind=WP) :: x0=1.5_WP, sigma=.15_WP ! Fermi function parameters to cut off GM where Rossby radius is resolved @@ -172,10 +230,15 @@ subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi real(kind=WP) :: zscaling(mesh%nl) real(kind=WP) :: bvref -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" ! fill arrays for 3D Redi and GM coefficients: F1(xy)*F2(z) !******************************* F1(x,y) *********************************** +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, nz, nzmax, nzmin, reso, c1, rosb, scaling, rr_ratio, aux_zz, zscaling, bvref) +!$OMP DO do n=1, myDim_nod2D nzmax=minval(nlevels(nod_in_elem2D(1:nod_in_elem2D_num(n), n)), 1) nzmin=maxval(ulevels(nod_in_elem2D(1:nod_in_elem2D_num(n), n)), 1) @@ -194,7 +257,7 @@ subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi !___________________________________________________________________ ! Cutoff K_GM depending on (Resolution/Rossby radius) ratio if (scaling_Rossby) then - rosb=min(c1/max(abs(coriolis_node(n)), f_min), r_max) + rosb=min(c1/max(abs(mesh%coriolis_node(n)), f_min), r_max) rr_ratio=min(reso/rosb, 5._WP) scaling=1._WP/(1._WP+exp(-(rr_ratio-x0)/sigma)) end if @@ -243,18 +306,22 @@ subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi Ki(nzmin,n)=K_hor*(reso/100000.0_WP)**2 end if end do - +!$OMP END DO + !Like in FESOM 1.4 we make Redi equal GM if (Redi .and. Fer_GM) then - !!PS Ki(1,:)=fer_k(1,:) - Ki(nzmin,:)=fer_k(nzmin,:) +!$OMP DO + do n=1, myDim_nod2D + Ki(nzmin, n)=fer_k(nzmin, n) + end do +!$OMP END DO end if !******************************* F2(z) (e.g. Ferreira et al., 2005) ********************************* !Ferreira, D., Marshall, J. and Heimbach, P.: Estimating Eddy Stresses by Fitting Dynamics to Observations Using a !Residual-Mean Ocean Circulation Model and Its Adjoint, Journal of Physical Oceanography, 35(10), 1891– !1910, doi:10.1175/jpo2785.1, 2005. - +!$OMP DO do n=1,myDim_nod2D nzmax=nlevels_nod2D(n) nzmin=ulevels_nod2D(n) @@ -311,7 +378,7 @@ subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi ! the surface template for the scaling !!PS do nz=2, nzmax do nz=nzmin+1, nzmax - fer_k(nz,n)=fer_k(1,n)*zscaling(nz) + fer_k(nz,n)=fer_k(nzmin,n)*zscaling(nz) end do ! after vertical Ferreira scaling is done also scale surface template !!PS fer_k(1,n)=fer_k(1,n)*zscaling(1) @@ -333,9 +400,11 @@ subroutine init_Redi_GM(mesh) !fer_compute_C_K_Redi Ki(nzmin,n)=Ki(nzmin,n)*0.5_WP*(zscaling(nzmin)+zscaling(nzmin+1)) end if end do - - if (Fer_GM) call exchange_nod(fer_c) - if (Fer_GM) call exchange_nod(fer_k) - if (Redi) call exchange_nod(Ki) +!$OMP END DO +!$OMP END PARALLEL + if (Fer_GM) call exchange_nod(fer_c, partit) + if (Fer_GM) call exchange_nod(fer_k, partit) + if (Redi) call exchange_nod(Ki, partit) +!$OMP BARRIER end subroutine init_Redi_GM !==================================================================== diff --git a/src/oce_ice_init_state.F90 b/src/oce_ice_init_state.F90 deleted file mode 100755 index ecb31fd31..000000000 --- a/src/oce_ice_init_state.F90 +++ /dev/null @@ -1,798 +0,0 @@ -!============================================================================== -! -! Simple initialization, forcing and output, just for tests -! for ocean and ice. -! ============================================================================ -! ============================================================================ -subroutine initial_state_test(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - ! - implicit none - integer :: elem, n, nz, elnodes(3) - integer :: elevation, strat, wind, cooling, tperturb - real(kind=WP) :: lon, lat, a, dst - real(kind=WP) :: minlat,maxlat,tt,rwidth - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - -! Now updated for the box mesh, it was originally designed for hex mesh. -! In that case, the southern boundary is 40, the northern 48.83, and 0:18 the -! longitudinal extent. - - - -! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - relax2clim=0.0 - - elevation=0 - strat=1 - wind=1 - cooling=0 - tperturb=0 - surf_relax_T=0 !10.0/10.0/24.0/3600. - surf_relax_S=0. - - - ! Stratification - if(strat==1) then - DO n=1, myDim_nod2D+eDim_nod2D - DO nz=1, nlevels_nod2D(n)-1 - ! tr_arr(nz,n,1)=tr_arr(nz,n,1)- 8.2e-3*abs(Z(nz)) - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.95_WP*20*tanh(abs(Z(nz))/300)-abs(Z(nz))/2400.0_WP - - END DO - END DO - end if - -Tsurf=tr_arr(1,:,1) - - if (tperturb==0) then - ! Temperature perturbation - do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - lon=coord_nod2D(1,n) - dst=sqrt((lat-37.5*rad)**2+(lon-4.5*rad)**2) - if (dst>1.5*rad) cycle - do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)+0.1*exp(-(dst/(1.5*rad))**2)*sin(pi*abs(Z(nz))/1600) - end do - end do - end if - - - if (cooling==1) then - ! Surface cooling - do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - lon=coord_nod2D(1,n) - dst=sqrt((lat-37.5*rad)**2+(lon-4.5*rad)**2) - if (dst>3.7*rad) cycle - Tsurf(n)=Tsurf(n)-1*exp(-(dst/(2.2*rad))**2) - end do - end if - -#ifdef false - if (wind==1) then - DO elem=1, myDim_elem2D - elnodes=elem2d_nodes(:,elem) - lat=sum(coord_nod2D(2,elnodes))/3.0_WP - lon=sum(coord_nod2D(1,elnodes))/3.0_WP - stress_surf(1,elem)=-0.2 *cos(pi*(lat-30.0*rad)/(15.0*rad)) !(8.83*rad)) - ! 40 is the south boundary of the hex box - END DO - end if -#endif - - if (wind==1) then - DO elem=1, myDim_elem2D - elnodes=elem2d_nodes(:,elem) - lat=sum(coord_nod2D(2,elnodes))/3.0_WP - lon=sum(coord_nod2D(1,elnodes))/3.0_WP - !stress_surf(1,elem)=0.1 *cos(pi*(lat-40.0*rad)/(1500000.0/r_earth))* & - ! exp(-((lat-40.0*rad)/(1500000.0/r_earth))**2) - ! 40 is the center of domain - stress_surf(1,elem)=0.1 *cos(pi*(lat-35.0*rad)/(1250000.0/r_earth))* & - exp(-((lat-35.0*rad)/(1250000.0/r_earth))**2)* & - (1.0_WP-0.5_WP*((lat-35.0*rad)/(1250000.0/r_earth))) - ! 35 is the center of domain - END DO - end if - - ! Fix for too low salinity - where (tr_arr(:,:,2)<20.4) tr_arr(:,:,2)=20.4 -end subroutine initial_state_test -! ==================================================================== - -subroutine initial_state_channel_test(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG - ! - implicit none - integer :: elem, n, nz, elnodes(3) - integer :: strat, wind, elevation - real(kind=WP) :: lon, lat, a, dst - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - - strat=1 - wind=0 - elevation=0 - - lat=30.0*rad - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-lat - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=25.-0.5e-5*r_earth*dst- 8.2e-3*abs(Z(nz)) -! tr_arr(nz,n,1)=(25.-0.5e-5*r_earth*dst)*exp(Z(nz)/800) - end do - end do - end if - - if (wind==1) then - DO elem=1, myDim_elem2D - call elem_center(elem, lon, lat, mesh) - stress_surf(1,elem)=-0.2 *cos(pi*(lat-30.0*rad)/(15*rad)) - ! 40 is the south boundary of the box - END DO - end if - - Tsurf=tr_arr(1,:,1) - Ssurf=tr_arr(1,:,2) - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - - ! small perturbation: - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-30.0*rad - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.2*sin(2*pi*dst/(15.0*rad))*sin(pi*Z(nz)/1500.0) & - *(sin(8*pi*coord_nod2D(1,n)/(20.0*rad))+ & - 0.5*sin(3*pi*coord_nod2D(1,n)/(20.0*rad))) - end do - end do - end if - - if(elevation==1) then - eta_n=0.01*(coord_nod2D(2,:)-30.0*rad)/(15.0*rad) - end if - - ! relaxation to climatology: - Do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - if(lat>43.5*rad) relax2clim(n)=clim_relax*(1.0-(45*rad-lat)/(1.5*rad)) - if(lat<31.5*rad) relax2clim(n)=clim_relax*(1.0+(30*rad-lat)/(1.5*rad)) - END DO - return - ! advection scheme tests - - dst=45.0*rad-30.0*rad; - DO n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n)-30.0*rad - lon=coord_nod2D(1,n) - eta_n(n)=(1000000./pi)*sin(pi*lat/dst)*sin(2*pi*lon/(20*rad)) - !eta_n(n)=(1000000./pi)*sin(pi*lat/dst)*sin(pi*lon/(10*rad)) - end do - - - Do n=1, myDim_elem2D - UV(1,:,n)=-sum(gradient_sca(4:6,n)*eta_n(elem2D_nodes(:,n))) - UV(2,:,n)=sum(gradient_sca(1:3,n)*eta_n(elem2D_nodes(:,n))) - END DO - - - !Do n=1, elem2D - !call elem_center(n, lon, lat, mesh) - !lat=lat-30.0*rad - !UV(1,:,n)=-(20*rad/dst)*0.1*cos(pi*lat/dst)*sin(2*pi*lon/(20*rad)) - !UV(2,:,n)= 0.2*sin(pi*lat/dst)*cos(2*pi*lon/(20*rad)) - !end do - relax2clim=0. - tr_arr(:,:,1)=20.0 - Tsurf=tr_arr(1,:,1) - surf_relax_T=0. - surf_relax_S=0. - !U_n=-0.3 - !V_n=0. -! Temperature perturbation - do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n)-32.5*rad - lon=coord_nod2D(1,n)-5.0*rad - if (lon>cyclic_length/2) lon=lon-cyclic_length - if (lon<-cyclic_length/2) lon=lon+cyclic_length - dst=sqrt((lat)**2+(lon)**2) - if (dst>1.5*rad) cycle - do nz=1, nlevels_nod2D(n)-1 - !if(abs(Z(nz)+500)<300) then - tr_arr(nz,n,1)=tr_arr(nz,n,1)+1.0*cos(pi*dst/2.0/1.5/rad) !exp(-(dst/(1.5*rad))**2) - !end if - end do - end do -end subroutine initial_state_channel_test -! ==================================================================== -subroutine initial_state_channel_narrow_test(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - use g_CONFIG - ! - implicit none - integer :: elem, n, nz, elnodes(3) - integer :: strat, wind, elevation - real(kind=WP) :: lon, lat, a, dst - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - - strat=1 - wind=0 - elevation=0 - - lat=30.0*rad - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-lat - do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=25.-0.5e-5*r_earth*dst- 8.2e-3*abs(Z(nz)) -! tr_arr(nz,n,1)=(25.-0.5e-5*r_earth*dst)*exp(Z(nz)/800) - end do - end do - end if - - if (wind==1) then - DO elem=1, myDim_elem2D - call elem_center(elem, lon, lat, mesh) - stress_surf(1,elem)=-0.2 *cos(pi*(lat-30.0*rad)/(10*rad)) - ! 40 is the south boundary of the box - END DO - end if - - Tsurf=tr_arr(1,:,1) - Ssurf=tr_arr(1,:,2) - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - - ! small perturbation: - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-30.0*rad - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.1*sin(pi*dst/(10.0*rad))*sin(pi*Z(nz)/1600.0) & - *(sin(4*pi*coord_nod2D(1,n)/(10.0*rad))+0.5*sin(3*pi*coord_nod2D(1,n)/(10.0*rad))) - end do - end do - end if - - if(elevation==1) then - eta_n=0.01*(coord_nod2D(2,:)-30.0*rad)/(10.0*rad) - end if - - ! relaxation to climatology: - Do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - if(lat>38.5*rad) relax2clim(n)=clim_relax*(1.0-(40*rad-lat)/(1.5*rad)) - if(lat<31.5*rad) relax2clim(n)=clim_relax*(1.0+(30*rad-lat)/(1.5*rad)) - END DO -!T_rhsAB=tr_arr(:,:,1) in case upwind1 -!S_rhsAB=tr_arr(:,:,2) -! Advection experiments: -return - UV(1,:,:)=-0.3 - UV(2,:,:)=0. - - dst=maxval(coord_nod2D(2,:))-30.0*rad; - DO n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n)-30.0*rad - lon=coord_nod2D(1,n) - eta_n(n)=(1000000./pi)*sin(pi*lat/dst)*sin(2*pi*lon/(10*rad)) - !eta_n(n)=(1000000./pi)*sin(pi*lat/dst)*sin(pi*lon/(10*rad)) - end do - - - Do n=1, myDim_elem2D - UV(1,:,n)=-sum(gradient_sca(4:6,n)*eta_n(elem2D_nodes(:,n))) - UV(2,:,n)=sum(gradient_sca(1:3,n)*eta_n(elem2D_nodes(:,n))) - END DO - - - Do n=1, myDim_elem2D - call elem_center(n, lon, lat, mesh) - lat=lat-30.0*rad - UV(1,:,n)=-0.1*(dst/10.0/rad)*cos(pi*lat/dst)*sin(2*pi*lon/(10*rad)) - UV(2,:,n)= 0.2*sin(pi*lat/dst)*cos(2*pi*lon/(10*rad)) - end do - - - - relax2clim=0. - tr_arr(:,:,1)=20.0 - -! Temperature perturbation - do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n)-32.5*rad - lon=coord_nod2D(1,n)-5.0*rad - if (lon>cyclic_length/2) lon=lon-cyclic_length - if (lon<-cyclic_length/2) lon=lon+cyclic_length - dst=sqrt((lat)**2+(lon)**2) - if (dst>1.5*rad) cycle - do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)+1.0*cos(pi*dst/2.0/1.5/rad) !exp(-(dst/(1.5*rad))**2) - end do - end do -end subroutine initial_state_channel_narrow_test -! ================================================================ -subroutine init_fields_na_test(mesh) - use MOD_MESH - use o_PARAM - use o_ARRAYS - use g_PARSUP - ! - implicit none - integer :: n, nz, nd - real(kind=WP) :: maxlat, minlat, rwidth, lat,lon - logical :: c_status - real(kind=WP) :: p0, ss, tt,pr - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - ! =================== - ! Fill the model fields with dummy values - ! =================== - - ! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - - ! =================== - ! Initialize T, S from files - ! =================== - - !call get_TS_mean('gur', c_status) - - ! =================== - ! If database contains in situ - ! temperature, transform it to - ! potential temperature - ! =================== - if(c_status) then - pr=0. - do n=1,myDim_nod2D+eDim_nod2D - DO nz=1,nlevels_nod2D(n)-1 - tt=tr_arr(nz,n,1) - ss=tr_arr(nz,n,2) - p0=abs(Z(nz)) - call ptheta(ss, tt, p0, pr, tr_arr(nz,n,1)) - END DO - end do - write(*,*) 'In situ temperature is converted to potential temperature' - end if - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - do n=1, myDim_nod2D+eDim_nod2D - Tsurf(n)=tr_arr(1,n,1) - Ssurf(n)=tr_arr(1,n,2) - end do - - ! ==================== - ! Specify where restoring to - ! climatology is applied - ! ==================== - ! relaxation to climatology: - maxlat=80.0*rad - minlat=-28.0*rad - rwidth=10.0*rad - Do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - if(lat>maxlat-rwidth) then - relax2clim(n)=clim_relax*(cos(pi*0.5*(maxlat-lat)/rwidth))**2 - end if - if(latmaxlat-rwidth) relax2clim(n)=clim_relax*(1.0-(maxlat-lat)/rwidth) - !if(lat15.0).and.(lon<40.0).and.(lat>30.0).and.(lat<40.0)) then - DO nz=1,nlevels_nod2D(n)-1 - if(tr_arr(nz,n,2)<38.0) tr_arr(nz,n,2)=38.0 - END DO - end if - end do -end subroutine init_fields_global_test -! ================================================================ -! ==================================================================== - -subroutine initial_state_channel_dima_test(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - ! - implicit none - integer :: elem, n, nz, elnodes(3) - integer :: strat, wind, elevation - real(kind=WP) :: lon, lat, a, dst - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - -! Default values - stress_surf=0.0 - tr_arr(:,:,1)=20.0_WP - Tsurf=tr_arr(1,:,1) - heat_flux=0.0_WP - tr_arr(:,:,2)=35.0_WP - Ssurf=tr_arr(1,:,2) - water_flux=0.0_WP - - strat=1 - wind=0 - elevation=0 - - lat=30.0*rad - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-lat - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=25.-0.5e-5*r_earth*dst- 8.2e-3*abs(Z(nz)) -! tr_arr(nz,n,1)=(25.-0.5e-5*r_earth*dst)*exp(Z(nz)/800) - end do - end do - end if - - if (wind==1) then - DO elem=1, myDim_elem2D - call elem_center(elem, lon, lat, mesh) - stress_surf(1,elem)=-0.2 *cos(pi*(lat-30.0*rad)/(15*rad)) - ! 40 is the south boundary of the box - END DO - end if - - Tsurf=tr_arr(1,:,1) - Ssurf=tr_arr(1,:,2) - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - - ! small perturbation: - if (strat==1) then - do n=1, myDim_nod2D+eDim_nod2D - dst=coord_nod2D(2, n)-30.0*rad - do nz=1, nlevels(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.1*sin(pi*dst/(15.0*rad))*sin(pi*Z(nz)/1500.0) & - *(sin(8*pi*coord_nod2D(1,n)/(40.0*rad))+sin(5*pi*coord_nod2D(1,n)/(40.0*rad))) - end do - end do - end if - - if(elevation==1) then - eta_n=0.01*(coord_nod2D(2,:)-30.0*rad)/(15.0*rad) - end if - - ! relaxation to climatology: - Do n=1, myDim_nod2D+eDim_nod2D - lat=coord_nod2D(2,n) - if(lat>43.5*rad) relax2clim(n)=clim_relax*(1.0-(45*rad-lat)/(1.5*rad)) - if(lat<31.5*rad) relax2clim(n)=clim_relax*(1.0+(30*rad-lat)/(1.5*rad)) - END DO -end subroutine initial_state_channel_dima_test -! ==================================================================== -subroutine ice_init_fields_test(mesh) -! -! Simple initialization for a box model to test the dynamical part. -! No thermodinamics is initialized here -! -use mod_mesh -use i_arrays -use i_param -use o_param -use g_PARSUP -use o_ARRAYS -use g_CONFIG -use g_comm_auto - -IMPLICIT NONE -real(kind=WP) :: xmin, xmax, ymin, ymax, Lx, Ly, meanf -integer :: n, elnodes(3) -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - - coriolis=1.4e-4 ! redefines Coriolis - coriolis_node=1.4e-4 - ! Set initial thickness and area coverage: - m_ice=2.0 - m_snow=0.0 - u_ice=0.0 - v_ice=0.0 - stress_atmice_x=0.0 - stress_atmice_y=0.0 - ! a_ice is defined later - - - ! Set ocean velocity (stationary in time): - xmin=0.0_WP*rad - xmax=20.0_WP*rad !10.0_WP*rad - ymin=30._WP*rad !30._WP*rad - ymax=45.0_WP*rad !40.0_WP*rad - Lx=xmax-xmin - Ly=ymax-ymin - - DO n=1, myDim_nod2D+eDim_nod2D - a_ice(n)=(coord_nod2d(1,n)-xmin)/Lx - END DO - - DO n=1, myDim_nod2D+eDim_nod2D - U_w(n)=0.1*(2*(coord_nod2d(2,n)-ymin)-Ly)/Ly - V_w(n)=-0.1*(2*(coord_nod2d(1,n)-xmin)-Lx)/Lx - END DO - m_ice=m_ice*a_ice - - ! Elevation computed approximately, from the geostrophy: - meanf= 1.4e-4*r_earth !2*omega*sin(yc)*r_earth - DO n=1, myDim_nod2d+eDim_nod2D - elevation(n)=-0.1*meanf/g *((coord_nod2d(2,n)-ymin)**2/Ly- & - (coord_nod2d(2,n)-ymin)+ & - (coord_nod2d(1,n)-xmin)**2/Lx -& - (coord_nod2d(1,n)-xmin)) - END DO -end subroutine ice_init_fields_test -! ============================================================================= -Subroutine ice_update_forcing_test(step, mesh) -! -! Here only simple wind variability is introduced -! -use mod_mesh -use i_arrays -use i_param -use o_param -use i_therm_param -use g_PARSUP -use g_forcing_arrays -USE g_CONFIG -IMPLICIT NONE -real(kind=WP) :: xmin, xm, ym, ymin, Lx, Ly, td, cdwin -integer :: step, n, elnodes(3) -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - cdwin=0.00225_WP - ! Set wind velocity (stationary in time): - xmin=0.0_WP*rad - Lx=20.0_WP*rad-xmin - ymin=30.0_WP*rad - Ly=45.0_WP*rad-ymin - td=4*3600*24.0_WP - - DO n=1, myDim_nod2D+eDim_nod2D - xm=coord_nod2d(1,n) - ym=coord_nod2d(2,n) - u_wind(n)=5.0+(sin(2*pi*step*dt/td)-3.0)*sin(2*pi*(xm-xmin)/Lx) & - *sin(pi*(ym-ymin)/Ly) - - v_wind(n)=5.0+(sin(2*pi*step*dt/td)-3.0)*sin(2*pi*(ym-ymin)/Ly) & - *sin(pi*(xm-xmin)/Lx) - END DO - ! wind to stress: - - stress_atmice_x = rhoair*cdwin*sqrt(u_wind**2+v_wind**2)*u_wind - stress_atmice_y = rhoair*cdwin*sqrt(u_wind**2+v_wind**2)*v_wind -end subroutine ice_update_forcing_test -! -!============================================================================== -! Simple initialization for tests for GM with the real geometry -! ============================================================================ -subroutine ini_global_ocean(mesh) - use MOD_MESH - use o_ARRAYS - use o_PARAM - use g_PARSUP - USE g_ROTATE_grid - ! - implicit none - integer :: n, nz - real(kind=WP) :: minlat,maxlat, lon, lat, val - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - tr_arr(:,:,1)=20.0_WP - tr_arr(:,:,2)=34.0_WP - - - call r2g(lon, maxlat, coord_nod2D(1,1), coord_nod2D(2,1)) - call r2g(lon, minlat, coord_nod2D(1,1), coord_nod2D(2,1)) - DO n=2,myDim_nod2D+eDim_nod2D - call r2g(lon, lat, coord_nod2D(1,n), coord_nod2D(2,n)) - maxlat=max(maxlat, lat) - minlat=min(minlat, lat) - END DO - - call MPI_AllREDUCE(minlat, val, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - minlat=val - call MPI_AllREDUCE(maxlat, val, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - maxlat=val - - ! Stratification - DO n=1, myDim_nod2D+eDim_nod2D - call r2g(lon, lat, coord_nod2D(1,n), coord_nod2D(2,n)) - DO nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-(lat-minlat)/(maxlat-minlat)*2.0_WP - END DO - END DO -end subroutine ini_global_ocean -! ==================================================================== -! -!============================================================================== -! Zero the dynamicsl variables and forcing to allow for debugging of new implementations -! ============================================================================ -subroutine zero_dynamics - use g_parsup - use o_arrays - use g_comm_auto - use o_tracers - use g_forcing_arrays - implicit none - - water_flux =0._WP - real_salt_flux=0._WP - surf_relax_S =0._WP - heat_flux =0._WP - UV =0._WP - Wvel =0._WP -end subroutine zero_dynamics -! ==================================================================== - diff --git a/src/oce_local.F90 b/src/oce_local.F90 index 3c4793e3e..eea0ec10b 100755 --- a/src/oce_local.F90 +++ b/src/oce_local.F90 @@ -1,23 +1,31 @@ module com_global2local_interface interface - subroutine com_global2local(mesh) + subroutine com_global2local(partit, mesh) use mod_mesh - type(t_mesh), intent(in) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module !============================================================================= -SUBROUTINE com_global2local(mesh) -USE g_PARSUP +SUBROUTINE com_global2local(partit, mesh) use MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE -type(t_mesh), intent(in) , target :: mesh +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit + INTEGER :: n, m INTEGER, ALLOCATABLE, DIMENSION(:) :: temp +#include "associate_part_def.h" #include "associate_mesh_ini.h" +#include "associate_part_ass.h" allocate(temp(max(nod2D, elem2D))) ! ========= @@ -116,15 +124,17 @@ SUBROUTINE com_global2local(mesh) deallocate(temp) END SUBROUTINE com_global2local !============================================================================= -SUBROUTINE save_dist_mesh(mesh) +SUBROUTINE save_dist_mesh(partit, mesh) USE g_CONFIG USE MOD_MESH - USE o_ARRAYS - USE g_PARSUP + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS use com_global2local_interface IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit Integer n, m, q, q2, counter, fileID, nend, nini,ed(2) character*10 mype_string,npes_string character(MAX_PATH) file_name @@ -132,12 +142,12 @@ SUBROUTINE save_dist_mesh(mesh) integer, allocatable, dimension(:) :: temp, ncount integer n1, n2, flag, eledges(4) -#include "associate_mesh_ini.h" +#include "associate_part_def.h" +#include "associate_mesh_ini.h" +#include "associate_part_ass.h" -!!$ allocate(temp(nod2D)) ! serves for mapping -!!$ allocate(ncount(npes+1)) write(mype_string,'(i5.5)') mype - write(npes_string,"(I10)") npes + write(npes_string,"(I10)") npes dist_mesh_dir=trim(meshpath)//'dist_'//trim(ADJUSTL(npes_string))//'/' ! ============================== @@ -187,7 +197,8 @@ SUBROUTINE save_dist_mesh(mesh) write(fileID,*) myList_elem2D(1:myDim_elem2D), com_elem2D%rlist(1:eDim_elem2D), temp(1:eXDim_elem2D) deallocate(temp) - allocate(myList_edge2D(4*myDim_elem2D)) + allocate(partit%myList_edge2D(4*myDim_elem2D)) + myList_edge2D=>partit%myList_edge2D counter = 0 do n=1, edge2D do q=1,2 @@ -219,14 +230,14 @@ SUBROUTINE save_dist_mesh(mesh) write(fileID,*) myDim_edge2D write(fileID,*) eDim_edge2D write(fileID,*) myList_edge2D(1:myDim_edge2D +eDim_edge2D) - deallocate(myList_edge2D) + deallocate(partit%myList_edge2D) close(fileID) ! ========================= ! communication information ! ========================= - call com_global2local(mesh) ! Do not call this subroutine earlier, global numbering is needed! + call com_global2local(partit, mesh) ! Do not call this subroutine earlier, global numbering is needed! file_name=trim(dist_mesh_dir)//'com_info'//trim(mype_string)//'.out' fileID=103+mype !skip unit range 100--102 open(fileID, file=file_name) @@ -239,13 +250,13 @@ SUBROUTINE save_dist_mesh(mesh) write(fileID,*) com_nod2D%sPE(1:com_nod2D%sPEnum) write(fileID,*) com_nod2D%sptr(1:com_nod2D%sPEnum+1) write(fileID,*) com_nod2D%slist - deallocate(myList_nod2D) + deallocate(partit%myList_nod2D) !!$ deallocate(com_nod2D%rPE) !!$ deallocate(com_nod2D%rptr) - deallocate(com_nod2D%rlist) + deallocate(partit%com_nod2D%rlist) !!$ deallocate(com_nod2D%sPE) !!$ deallocate(com_nod2D%sptr) - deallocate(com_nod2D%slist) + deallocate(partit%com_nod2D%slist) write(fileID,*) com_elem2D%rPEnum write(fileID,*) com_elem2D%rPE(1:com_elem2D%rPEnum) @@ -255,13 +266,13 @@ SUBROUTINE save_dist_mesh(mesh) write(fileID,*) com_elem2D%sPE(1:com_elem2D%sPEnum) write(fileID,*) com_elem2D%sptr(1:com_elem2D%sPEnum+1) write(fileID,*) com_elem2D%slist - deallocate(myList_elem2D) + deallocate(partit%myList_elem2D) !!$ deallocate(com_elem2D%rPE) !!$ deallocate(com_elem2D%rptr) - deallocate(com_elem2D%rlist) + deallocate(partit%com_elem2D%rlist) !!$ deallocate(com_elem2D%sPE) !!$ deallocate(com_elem2D%sptr) - deallocate(com_elem2D%slist) + deallocate(partit%com_elem2D%slist) write(fileID,*) com_elem2D_full%rPEnum write(fileID,*) com_elem2D_full%rPE(1:com_elem2D_full%rPEnum) @@ -273,10 +284,10 @@ SUBROUTINE save_dist_mesh(mesh) write(fileID,*) com_elem2D_full%slist !!$ deallocate(com_elem2D_full%rPE) !!$ deallocate(com_elem2D_full%rptr) - deallocate(com_elem2D_full%rlist) + deallocate(partit%com_elem2D_full%rlist) !!$ deallocate(com_elem2D_full%sPE) !!$ deallocate(com_elem2D_full%sptr) - deallocate(com_elem2D_full%slist) + deallocate(partit%com_elem2D_full%slist) close(fileID) ! ================================ ! mapping ( PE contiguous 2D numbering) diff --git a/src/oce_mesh.F90 b/src/oce_mesh.F90 index 0435381b5..59c74a906 100755 --- a/src/oce_mesh.F90 +++ b/src/oce_mesh.F90 @@ -1,56 +1,77 @@ module read_mesh_interface interface - subroutine read_mesh(mesh) + subroutine read_mesh(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module find_levels_interface interface - subroutine find_levels(mesh) + subroutine find_levels(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module find_levels_cavity_interface interface - subroutine find_levels_cavity(mesh) + subroutine find_levels_cavity(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module test_tri_interface interface - subroutine test_tri(mesh) + subroutine test_tri(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module load_edges_interface interface - subroutine load_edges(mesh) + subroutine load_edges(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module find_neighbors_interface interface - subroutine find_neighbors(mesh) + subroutine find_neighbors(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module mesh_areas_interface interface - subroutine mesh_areas(mesh) + subroutine mesh_areas(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module @@ -58,39 +79,59 @@ module elem_center_interface interface subroutine elem_center(elem, x, y, mesh) use mod_mesh - integer :: elem - real(kind=WP) :: x, y - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + integer :: elem + real(kind=WP), intent(inout) :: x, y + type(t_mesh), intent(inout), target :: mesh end subroutine end interface end module module edge_center_interface interface subroutine edge_center(n1, n2, x, y, mesh) - USE MOD_MESH + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP integer :: n1, n2 - real(kind=WP) :: x, y - type(t_mesh), intent(inout), target :: mesh + real(kind=WP), intent(inout):: x, y + type(t_mesh), intent(inout), target :: mesh end subroutine end interface end module module mesh_auxiliary_arrays_interface interface - subroutine mesh_auxiliary_arrays(mesh) + subroutine mesh_auxiliary_arrays(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module module find_levels_min_e2n_interface interface - subroutine find_levels_min_e2n(mesh) + subroutine find_levels_min_e2n(partit, mesh) use mod_mesh - type(t_mesh), intent(inout) , target :: mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module +module check_total_volume_interface + interface + subroutine check_total_volume(partit, mesh) + use mod_mesh + USE MOD_PARTIT + USE MOD_PARSUP + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit end subroutine end interface end module - ! Driving routine. The distributed mesh information and mesh proper ! are read from files. @@ -98,9 +139,10 @@ subroutine find_levels_min_e2n(mesh) ! At the beginning of each routine I list arrays it initializes. ! Array sizes vary (sometimes we need only myDim, yet sometimes more)! ! S. Danilov, 2012 -SUBROUTINE mesh_setup(mesh) +SUBROUTINE mesh_setup(partit, mesh) USE MOD_MESH -USE g_parsup +USE MOD_PARTIT +USE MOD_PARSUP USE g_ROTATE_grid use read_mesh_interface use find_levels_interface @@ -111,39 +153,42 @@ SUBROUTINE mesh_setup(mesh) use find_levels_min_e2n_interface use find_neighbors_interface use mesh_areas_interface +use par_support_interfaces IMPLICIT NONE - - type(t_mesh), intent(inout) :: mesh + type(t_mesh), intent(inout) :: mesh + type(t_partit), intent(inout), target :: partit call set_mesh_transform_matrix !(rotated grid) - call read_mesh(mesh) - call set_par_support(mesh) - call find_levels(mesh) + call read_mesh(partit, mesh) + call init_mpi_types(partit, mesh) + call init_gatherLists(partit) + if(partit%mype==0) write(*,*) 'Communication arrays are set' + call test_tri(partit, mesh) + call load_edges(partit, mesh) + call find_neighbors(partit, mesh) - if (use_cavity) call find_levels_cavity(mesh) - - call test_tri(mesh) - call load_edges(mesh) - call find_neighbors(mesh) - call find_levels_min_e2n(mesh) - call mesh_areas(mesh) - call mesh_auxiliary_arrays(mesh) + call find_levels(partit, mesh) + if (use_cavity) call find_levels_cavity(partit, mesh) + + call find_levels_min_e2n(partit, mesh) + call mesh_areas(partit, mesh) + call mesh_auxiliary_arrays(partit, mesh) END SUBROUTINE mesh_setup !====================================================================== ! Reads distributed mesh ! The mesh will be read only by 0 proc and broadcasted to the others. -SUBROUTINE read_mesh(mesh) +SUBROUTINE read_mesh(partit, mesh) USE o_PARAM USE g_CONFIG USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP USE o_ARRAYS -USE g_PARSUP USE g_rotate_grid IMPLICIT NONE - -type(t_mesh), intent(inout), target :: mesh - +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit integer :: n, nn, k, m, fileID integer :: error_status !0/1=no error/error integer :: vert_nodes(1000) @@ -154,16 +199,20 @@ SUBROUTINE read_mesh(mesh) character(len=MAX_PATH) :: file_name character(len=MAX_PATH) :: dist_mesh_dir integer :: flag_wrongaux3d=0 - integer :: ierror ! return error code - integer, allocatable, dimension(:) :: mapping - integer, allocatable, dimension(:,:) :: ibuff + integer :: ierror ! return error code + logical :: file_exist + integer, allocatable, dimension(:) :: mapping + integer, allocatable, dimension(:,:) :: ibuff real(kind=WP), allocatable, dimension(:,:) :: rbuff - integer, allocatable, dimension(:,:) :: auxbuff ! will be used for reading aux3d.out - integer fileunit, iostat - character(32) mesh_checksum + integer, allocatable, dimension(:,:) :: auxbuff ! will be used for reading aux3d.out + integer :: fileunit, iostat + character(32) :: mesh_checksum -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +mype=>partit%mype +npes=>partit%npes +MPI_COMM_FESOM=>partit%MPI_COMM_FESOM !mesh related files will be read in chunks of chunk_size chunk_size=100000 @@ -173,14 +222,12 @@ SUBROUTINE read_mesh(mesh) !============================== allocate(mapping(chunk_size)) allocate(ibuff(chunk_size,4), rbuff(chunk_size,3)) - mapping=0 !============================== t0=MPI_Wtime() write(mype_string,'(i5.5)') mype - write(npes_string,"(I10)") npes + write(npes_string,"(I10)") npes dist_mesh_dir=trim(meshpath)//'dist_'//trim(ADJUSTL(npes_string))//'/' - !======================= ! rank partitioning vector ! will be read by 0 proc @@ -189,7 +236,8 @@ SUBROUTINE read_mesh(mesh) file_name=trim(dist_mesh_dir)//'rpart.out' fileID=10 open(fileID, file=trim(file_name)) - allocate(part(npes+1)) + allocate(partit%part(npes+1)) + part=>partit%part read(fileID,*) n error_status=0 if (n/=npes) error_status=1 !set the error status for consistency in rpart @@ -205,16 +253,16 @@ SUBROUTINE read_mesh(mesh) if (error_status/=0) then write(*,*) n write(*,*) 'error: NPES does not coincide with that of the mesh' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) STOP end if ! broadcasting partitioning vector to the other procs if (mype/=0) then - allocate(part(npes+1)) + allocate(partit%part(npes+1)) + part=>partit%part end if call MPI_BCast(part, npes+1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) if (mype==0) write(*,*) mype,'rpart is read' - !=========================== ! Lists of nodes and elements in global indexing. ! every proc reads its file @@ -226,23 +274,25 @@ SUBROUTINE read_mesh(mesh) open(fileID, file=trim(file_name)) read(fileID,*) n - read(fileID,*) myDim_nod2D - read(fileID,*) eDim_nod2D - allocate(myList_nod2D(myDim_nod2D+eDim_nod2D)) - read(fileID,*) myList_nod2D + read(fileID,*) partit%myDim_nod2D + read(fileID,*) partit%eDim_nod2D + allocate(partit%myList_nod2D(partit%myDim_nod2D+partit%eDim_nod2D)) + read(fileID,*) partit%myList_nod2D - read(fileID,*) myDim_elem2D - read(fileID,*) eDim_elem2D - read(fileID,*) eXDim_elem2D - allocate(myList_elem2D(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) - read(fileID,*) myList_elem2D + read(fileID,*) partit%myDim_elem2D + read(fileID,*) partit%eDim_elem2D + read(fileID,*) partit%eXDim_elem2D + allocate(partit%myList_elem2D(partit%myDim_elem2D+partit%eDim_elem2D+partit%eXDim_elem2D)) + read(fileID,*) partit%myList_elem2D - read(fileID,*) myDim_edge2D - read(fileID,*) eDim_edge2D - allocate(myList_edge2D(myDim_edge2D+eDim_edge2D)) - read(fileID,*) myList_edge2D ! m + read(fileID,*) partit%myDim_edge2D + read(fileID,*) partit%eDim_edge2D + allocate(partit%myList_edge2D(partit%myDim_edge2D+partit%eDim_edge2D)) + read(fileID,*) partit%myList_edge2D ! m close(fileID) +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" if (mype==0) write(*,*) 'myLists are read' !============================== @@ -264,7 +314,7 @@ SUBROUTINE read_mesh(mesh) if (error_status/=0) then write(*,*) n write(*,*) 'error: nod2D/=part(npes+1)-1' - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) STOP end if @@ -363,7 +413,7 @@ SUBROUTINE read_mesh(mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) !___________________________________________________________________________ ! check if rotation needs to be applied to an unrotated mesh elseif ((mype==0) .and. (.not. force_rotation) .and. (flag_checkmustrot==1) .and. (.not. toy_ocean)) then @@ -384,7 +434,7 @@ SUBROUTINE read_mesh(mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if @@ -467,67 +517,226 @@ SUBROUTINE read_mesh(mesh) ! read depth data !============================== ! 0 proc reads header of aux3d.out and broadcasts it between procs - allocate(mesh%depth(myDim_nod2D+eDim_nod2D)) - if (mype==0) then !open the file for reading on 0 proc + ! + ! + !______________________________________________________________________________ + ! read depth from aux3d.out + if (trim(use_depthfile)=='aux3d') then + ! check if aux3d.out file does exist + file_exist=.False. file_name=trim(meshpath)//'aux3d.out' - open(fileID, file=file_name) - read(fileID,*) mesh%nl ! the number of levels - end if - call MPI_BCast(mesh%nl, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) - if (mesh%nl < 3) then - write(*,*) '!!!Number of levels is less than 3, model will stop!!!' - call par_ex - stop - end if - allocate(mesh%zbar(mesh%nl)) ! allocate the array for storing the standard depths - if (mype==0) read(fileID,*) mesh%zbar - call MPI_BCast(mesh%zbar, mesh%nl, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - if(mesh%zbar(2)>0) mesh%zbar=-mesh%zbar ! zbar is negative - allocate(mesh%Z(mesh%nl-1)) - mesh%Z=mesh%zbar(1:mesh%nl-1)+mesh%zbar(2:mesh%nl) ! mid-depths of cells - mesh%Z=0.5_WP*mesh%Z + inquire(file=trim(file_name),exist=file_exist) + !___________________________________________________________________________ + if (file_exist) then + if (mype==0) then !open the file for reading on 0 proc + open(fileID, file=file_name) + read(fileID,*) mesh%nl ! the number of levels + end if + call MPI_BCast(mesh%nl, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + if (mesh%nl < 3) then + write(*,*) '!!!Number of levels is less than 3, model will stop!!!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + allocate(mesh%zbar(mesh%nl)) ! allocate the array for storing the standard depths + if (mype==0) read(fileID,*) mesh%zbar + call MPI_BCast(mesh%zbar, mesh%nl, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + if(mesh%zbar(2)>0) mesh%zbar=-mesh%zbar ! zbar is negative + allocate(mesh%Z(mesh%nl-1)) + mesh%Z=mesh%zbar(1:mesh%nl-1)+mesh%zbar(2:mesh%nl) ! mid-depths of cells + mesh%Z=0.5_WP*mesh%Z + !___________________________________________________________________________ + else + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use aux3d.out file to define your depth, but ' + write(*,*) ' the file seems not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" ' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + end if + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) + end if + + !______________________________________________________________________________ + ! read depth from depth@node.out or depth@elem.out + elseif (trim(use_depthfile)=='depth@') then + !___________________________________________________________________________ + ! load file depth_zlev.out --> contains number of model levels and full depth + ! levels + file_exist=.False. + file_name=trim(meshpath)//'depth_zlev.out' + inquire(file=trim(file_name),exist=file_exist) + if (file_exist) then + if (mype==0) then !open the file for reading on 0 proc + open(fileID, file=file_name) + read(fileID,*) mesh%nl ! the number of levels + end if + call MPI_BCast(mesh%nl, 1, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + if (mesh%nl < 3) then + write(*,*) '!!!Number of levels is less than 3, model will stop!!!' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + allocate(mesh%zbar(mesh%nl)) ! allocate the array for storing the standard depths + if (mype==0) read(fileID,*) mesh%zbar + call MPI_BCast(mesh%zbar, mesh%nl, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + if(mesh%zbar(2)>0) mesh%zbar=-mesh%zbar ! zbar is negative + allocate(mesh%Z(mesh%nl-1)) + mesh%Z=mesh%zbar(1:mesh%nl-1)+mesh%zbar(2:mesh%nl) ! mid-depths of cells + mesh%Z=0.5_WP*mesh%Z + if (mype==0) close(fileID) + !___________________________________________________________________________ + else + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use depth@elem.out or depth@node.out file, therefore' + write(*,*) ' you also need the file depth_zlev.out which contains the model ' + write(*,*) ' number of layers and the depth of model levels. This file seems ' + write(*,*) ' not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" and your meshfolder' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) + end if + end if + + !___________________________________________________________________________ + ! load file depth@elem.out or depth@node.out contains topography either at + ! nodes or elements + file_exist=.False. + if (use_depthonelem) then + file_name=trim(meshpath)//'depth@elem.out' + else + file_name=trim(meshpath)//'depth@node.out' + end if + inquire(file=trim(file_name),exist=file_exist) + if (file_exist) then + if (mype==0) open(fileID, file=file_name) + else + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: You want to use depth@elem.out or depth@node.out file to ' + write(*,*) ' define your depth, but the file seems not to exist' + write(*,*) ' --> check in namelist.config, the flag use_depthfile must be' + write(*,*) ' use_depthfile= "aux3d" or "depth@" and your meshfolder ' + write(*,*) ' --> model stops here' + write(*,*) '____________________________________________________________________' + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) + end if + end if + end if ! 0 proc reads the data in chunks and distributes it between other procs - mesh_check=0 - do nchunk=0, (mesh%nod2D-1)/chunk_size - mapping(1:chunk_size)=0 - do n=1, myDim_nod2D+eDim_nod2D - ipos=(myList_nod2D(n)-1)/chunk_size - if (ipos==nchunk) then - iofs=myList_nod2D(n)-nchunk*chunk_size - mapping(iofs)=n - end if - end do - - k=min(chunk_size, mesh%nod2D-nchunk*chunk_size) - if (mype==0) then - do n=1, k - read(fileID,*) rbuff(n,1) - end do - ! check here if aux3d.out contains depth levels (FESOM2.0) or 3d indices - ! (FESOM1.4) like that check if the proper mesh is loaded. 11000.0 is here - ! the maximum depth on earth in marianen trench - if ( flag_wrongaux3d==0 .and. any(abs(rbuff(1:k,1))>11000.0_WP) ) flag_wrongaux3d=1 + !______________________________________________________________________________ + ! bottom topography is defined on elements + if (use_depthonelem) then + !___________________________________________________________________________ + ! allocate mesh%depth at elements + allocate(mesh%depth(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) + + !___________________________________________________________________________ + mesh_check=0 + do nchunk=0, (mesh%elem2D-1)/chunk_size + mapping(1:chunk_size)=0 + do n=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D + ipos=(myList_elem2D(n)-1)/chunk_size + if (ipos==nchunk) then + iofs=myList_elem2D(n)-nchunk*chunk_size + mapping(iofs)=n + end if + end do + + k=min(chunk_size, mesh%elem2D-nchunk*chunk_size) + if (mype==0) then + do n=1, k + read(fileID,*) rbuff(n,1) + end do + ! check here if aux3d.out contains depth levels (FESOM2.0) or 3d indices + ! (FESOM1.4) like that check if the proper mesh is loaded. 11000.0 is here + ! the maximum depth on earth in marianen trench + if ( flag_wrongaux3d==0 .and. any(abs(rbuff(1:k,1))>11000.0_WP) ) flag_wrongaux3d=1 + end if + call MPI_BCast(rbuff(1:k,1), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + + do n=1, k + x=rbuff(n,1) + if (x>0) x=-x !deps must be negative! + if (x>mesh%zbar(thers_zbar_lev)) x=mesh%zbar(thers_zbar_lev) !threshold for depth + if (mapping(n)>0) then + mesh_check=mesh_check+1 + mesh%depth(mapping(n))=x + end if + end do ! --> do n=1, k + end do ! --> do nchunk=0, (mesh%elem2D-1)/chunk_size + + !___________________________________________________________________________ + if (mype==0) close(fileID) + + !___________________________________________________________________________ + if (mesh_check/=myDim_elem2D+eDim_elem2D+eXDim_elem2D) then + write(*,*) 'ERROR while reading aux3d.out on mype=', mype + write(*,*) mesh_check, ' values have been read in according to partitioning' + write(*,*) 'it does not equal to myDim_elem2D+eDim_elem2D+eXDim_elem2D = ', myDim_elem2D+eDim_elem2D+eXDim_elem2D end if - call MPI_BCast(rbuff(1:k,1), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) - - do n=1, k - x=rbuff(n,1) - if (x>0) x=-x !deps must be negative! - if (x>mesh%zbar(5)) x=mesh%zbar(5) !threshold for depth - if (mapping(n)>0) then - mesh_check=mesh_check+1 - mesh%depth(mapping(n))=x + + !______________________________________________________________________________ + ! bottom topography is defined on nodes + else + !___________________________________________________________________________ + ! allocate mesh%depth at nodes + allocate(mesh%depth(myDim_nod2D+eDim_nod2D)) + + !___________________________________________________________________________ + ! fill mesh%depth from file with neighborhood information + mesh_check=0 + do nchunk=0, (mesh%nod2D-1)/chunk_size + mapping(1:chunk_size)=0 + do n=1, myDim_nod2D+eDim_nod2D + ipos=(myList_nod2D(n)-1)/chunk_size + if (ipos==nchunk) then + iofs=myList_nod2D(n)-nchunk*chunk_size + mapping(iofs)=n + end if + end do + + k=min(chunk_size, mesh%nod2D-nchunk*chunk_size) + if (mype==0) then + do n=1, k + read(fileID,*) rbuff(n,1) + end do + ! check here if aux3d.out contains depth levels (FESOM2.0) or 3d indices + ! (FESOM1.4) like that check if the proper mesh is loaded. 11000.0 is here + ! the maximum depth on earth in marianen trench + if ( flag_wrongaux3d==0 .and. any(abs(rbuff(1:k,1))>11000.0_WP) ) flag_wrongaux3d=1 end if - end do - end do - - if (mype==0) close(fileID) - if (mesh_check/=myDim_nod2D+eDim_nod2D) then - write(*,*) 'ERROR while reading aux3d.out on mype=', mype - write(*,*) mesh_check, ' values have been read in according to partitioning' - write(*,*) 'it does not equal to myDim_nod2D+eDim_nod2D = ', myDim_nod2D+eDim_nod2D - end if + call MPI_BCast(rbuff(1:k,1), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + + do n=1, k + x=rbuff(n,1) + if (x>0) x=-x !deps must be negative! + if (x>mesh%zbar(thers_zbar_lev)) x=mesh%zbar(thers_zbar_lev) !threshold for depth + if (mapping(n)>0) then + mesh_check=mesh_check+1 + mesh%depth(mapping(n))=x + end if + end do ! --> do n=1, k + end do ! --> do nchunk=0, (mesh%nod2D-1)/chunk_size + + !___________________________________________________________________________ + if (mype==0) close(fileID) + + !___________________________________________________________________________ + if (mesh_check/=myDim_nod2D+eDim_nod2D) then + write(*,*) 'ERROR while reading aux3d.out on mype=', mype + write(*,*) mesh_check, ' values have been read in according to partitioning' + write(*,*) 'it does not equal to myDim_nod2D+eDim_nod2D = ', myDim_nod2D+eDim_nod2D + end if + end if ! --> if (use_depthonelem) then + + !_______________________________________________________________________________ ! check if the mesh structure of FESOM2.0 and of FESOM1.4 is loaded if ((mype==0) .and. (flag_wrongaux3d==1)) then @@ -547,7 +756,7 @@ SUBROUTINE read_mesh(mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex(0) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 0) end if ! ============================== @@ -567,7 +776,8 @@ SUBROUTINE read_mesh(mesh) read(fileID,*) com_nod2D%rPE(1:com_nod2D%rPEnum) !!$ ALLOCATE(com_nod2D%rptr(com_nod2D%rPEnum+1)) read(fileID,*) com_nod2D%rptr(1:com_nod2D%rPEnum+1) - ALLOCATE(com_nod2D%rlist(eDim_nod2D)) + ALLOCATE(partit%com_nod2D%rlist(eDim_nod2D)) + read(fileID,*) com_nod2D%rlist read(fileID,*) com_nod2D%sPEnum @@ -654,20 +864,8 @@ SUBROUTINE read_mesh(mesh) deallocate(rbuff, ibuff) deallocate(mapping) - ! try to calculate checksum and distribute it to every process - ! the shell command is probably not very portable and might fail, in which case we just do not have a checksum - mesh%representative_checksum = ' ' ! we use md5 which is 32 chars long, so set default value to the same length - if(mype==0) then - call execute_command_line("md5sum "//trim(MeshPath)//"nod2d.out | cut -d ' ' -f 1 > "//trim(ResultPath)//"mesh_checksum") - ! we can not check if execute_command_line succeeded (e.g. with cmdstat), as the pipe will swallow any error from the initial command - ! so we have to thoroughly check if the file exists and if it contains our checksum - open(newunit=fileunit, file=trim(ResultPath)//"mesh_checksum", action="READ", iostat=iostat) - if(iostat==0) read(fileunit, *, iostat=iostat) mesh_checksum - close(fileunit) - if(iostat==0 .and. len_trim(mesh_checksum)==32) mesh%representative_checksum = mesh_checksum - end if - call MPI_BCAST(mesh%representative_checksum, len(mesh%representative_checksum), MPI_CHAR, 0, MPI_COMM_FESOM, MPIerr) - mesh%representative_checksum = trim(mesh%representative_checksum) ! if we did not get a checksum, the string is empty +! no checksum for now, execute_command_line is failing too often. if you think it is important, please drop me a line and I will try to revive it: jan.hegewald@awi.de +mesh%representative_checksum = '' CALL MPI_BARRIER(MPI_COMM_FESOM, MPIerr) t1=MPI_Wtime() @@ -685,15 +883,17 @@ END subroutine read_mesh ! load fesom2.0 mesh files: nlvls.out and elvls.out that are created during the ! partitioning !_______________________________________________________________________________ -subroutine find_levels(mesh) +subroutine find_levels(partit, mesh) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM - use g_PARSUP use g_config ! implicit none ! - type(t_mesh), intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit character(len=MAX_PATH) :: file_name integer :: ierror ! MPI return error code integer :: k, n, fileID @@ -701,10 +901,8 @@ subroutine find_levels(mesh) integer, allocatable, dimension(:) :: mapping integer, allocatable, dimension(:) :: ibuff real(kind=WP) :: t0, t1 - -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" - +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() !___________________________________________________________________________ allocate(mesh%nlevels(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) @@ -887,33 +1085,36 @@ end subroutine find_levels ! cavity_elvls.out that are created during the partitioning when namelist.config flag ! use_cavity=.True. !_______________________________________________________________________________ -subroutine find_levels_cavity(mesh) +subroutine find_levels_cavity(partit, mesh) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM - use g_PARSUP use g_config ! implicit none ! - type(t_mesh), intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit character(MAX_PATH) :: file_name integer :: ierror ! MPI return error code integer :: k, n, fileID integer :: nchunk, chunk_size, ipos, iofs, mesh_check integer, allocatable, dimension(:) :: mapping integer, allocatable, dimension(:) :: ibuff + real(kind=WP), allocatable, dimension(:) :: rbuff real(kind=WP) :: t0, t1 logical :: file_exist=.False. - integer :: elem, elnodes(3), ule, uln(3) -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" + integer :: elem, elnodes(3), ule, uln(3), node, j, nz + integer, allocatable, dimension(:) :: numelemtonode +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() !___________________________________________________________________________ ! allocate arrays, reset pointers !!PS allocate(mesh%cavity_flag_e(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) !!PS allocate(mesh%cavity_flag_n(myDim_nod2D+eDim_nod2D)) - allocate(mesh%cavity_depth(myDim_nod2D+eDim_nod2D)) !___________________________________________________________________________ ! mesh related files will be read in chunks of chunk_size @@ -944,7 +1145,7 @@ subroutine find_levels_cavity(mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end if end if @@ -1032,7 +1233,7 @@ subroutine find_levels_cavity(mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end if end if @@ -1096,6 +1297,8 @@ subroutine find_levels_cavity(mesh) print *, achar(27)//'[0m' end if + deallocate(ibuff) + !___________________________________________________________________________ ! Part III: computing cavity flag at nodes and elements !!PS mesh%cavity_flag_e = 0 @@ -1175,7 +1378,11 @@ subroutine find_levels_cavity(mesh) !___________________________________________________________________________ ! Part IV: reading cavity depth at nodes if (mype==0) then - file_name=trim(meshpath)//'cavity_depth.out' + if (use_cavityonelem) then + file_name = trim(meshpath)//'cavity_depth@elem.out' + else + file_name = trim(meshpath)//'cavity_depth@node.out' + end if file_exist=.False. inquire(file=trim(file_name),exist=file_exist) if (file_exist) then @@ -1191,73 +1398,146 @@ subroutine find_levels_cavity(mesh) write(*,*) '____________________________________________________________________' print *, achar(27)//'[0m' write(*,*) - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end if end if - ! 0 proc reads the data in chunks and distributes it between other procs - mesh_check=0 - do nchunk=0, (mesh%nod2D-1)/chunk_size + !___________________________________________________________________________ + ! cavity topography is defined on elements + if (use_cavityonelem) then !_______________________________________________________________________ - !create the mapping for the current chunk - mapping(1:chunk_size)=0 - do n=1, myDim_nod2D+eDim_nod2D - ! myList_nod2D(n) contains global vertice index of the local - ! vertice on that CPU - ! ipos is integer, (myList_nod2D(n)-1)/chunk_size always rounds - ! off to integer values - ! --> ipos is an index to which chunk a global vertice on a local CPU - ! belongs - ipos=(myList_nod2D(n)-1)/chunk_size + ! allocate mesh%depth at elements + allocate(mesh%cavity_depth(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) + + !_______________________________________________________________________ + ! fill mesh%cavity_depth from file with neighborhood information + mesh_check=0 + do nchunk=0, (mesh%elem2D-1)/chunk_size + mapping(1:chunk_size)=0 + do n=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D + ipos=(myList_elem2D(n)-1)/chunk_size + if (ipos==nchunk) then + iofs=myList_elem2D(n)-nchunk*chunk_size + mapping(iofs)=n + end if + end do - ! if global vertice chunk index (ipos) lies within the actual chunk - if (ipos==nchunk) then - iofs=myList_nod2D(n)-nchunk*chunk_size - ! connect chunk reduced (iofs) global vertice index with local - ! vertice index n --> mapping(iofs)=n - mapping(iofs)=n + !___________________________________________________________________ + ! read the chunk piece into the buffer --> done only by one + ! CPU (mype==0) + k=min(chunk_size, mesh%elem2D-nchunk*chunk_size) + if (mype==0) then + do n=1, k + read(fileID,*) rbuff(n) + end do end if - end do + + !___________________________________________________________________ + ! broadcast chunk buffer to all other CPUs (k...size of buffer) + call MPI_BCast(rbuff(1:k), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + + !___________________________________________________________________ + ! fill the local arrays + do n=1, k + if (mapping(n)>0) then + mesh_check=mesh_check+1 + mesh%cavity_depth(mapping(n))=rbuff(n) + end if + end do + end do ! --> do nchunk=0, (mesh%elem2D-1)/chunk_size !_______________________________________________________________________ - ! read the chunk piece into the buffer --> done only by one CPU (mype==0) - ! k ... is actual chunk size, considers also possible change in chunk size - ! at the end i.e nod2d=130000, nchunk_0 = 100000, nchunk_1=30000 - k=min(chunk_size, mesh%nod2D-nchunk*chunk_size) - if (mype==0) then - do n=1, k - read(fileID,*) ibuff(n) - end do + if (mype==0) close(fileID) + + !_______________________________________________________________________ + if (mesh_check/=myDim_elem2D+eDim_elem2D+eXDim_elem2D) then + write(*,*) + print *, achar(27)//'[33m' + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: while reading cavity_depth.out on mype=', mype + write(*,*) ' ',mesh_check, ' values have been read in according to partitioning' + write(*,*) ' it does not equal to myDim_elem2D+eDim_elem2D+eXDim_elem2D = ', myDim_elem2D+eDim_elem2D+eXDim_elem2D + write(*,*) '____________________________________________________________________' + print *, achar(27)//'[0m' end if + !___________________________________________________________________________ + ! cavity topography is defined on nodes + else !_______________________________________________________________________ - ! broadcast chunk buffer to all other CPUs (k...size of buffer) - call MPI_BCast(ibuff(1:k), k, MPI_INTEGER, 0, MPI_COMM_FESOM, ierror) + ! allocate mesh%depth at nodes + allocate(mesh%cavity_depth(myDim_nod2D+eDim_nod2D)) !_______________________________________________________________________ - ! fill the local arrays - do n=1, k - if (mapping(n)>0) then - mesh_check=mesh_check+1 - mesh%cavity_depth(mapping(n))=ibuff(n) + ! fill mesh%cavity_depth from file with neighborhood information + ! 0 proc reads the data in chunks and distributes it between other procs + mesh_check=0 + do nchunk=0, (mesh%nod2D-1)/chunk_size + !___________________________________________________________________ + !create the mapping for the current chunk + mapping(1:chunk_size)=0 + do n=1, myDim_nod2D+eDim_nod2D + ! myList_nod2D(n) contains global vertice index of the local + ! vertice on that CPU + ! ipos is integer, (myList_nod2D(n)-1)/chunk_size always rounds + ! off to integer values + ! --> ipos is an index to which chunk a global vertice on a local CPU + ! belongs + ipos=(myList_nod2D(n)-1)/chunk_size + + ! if global vertice chunk index (ipos) lies within the actual chunk + if (ipos==nchunk) then + iofs=myList_nod2D(n)-nchunk*chunk_size + ! connect chunk reduced (iofs) global vertice index with local + ! vertice index n --> mapping(iofs)=n + mapping(iofs)=n + end if + end do + + !_______________________________________________________________________ + ! read the chunk piece into the buffer --> done only by one CPU (mype==0) + ! k ... is actual chunk size, considers also possible change in chunk size + ! at the end i.e nod2d=130000, nchunk_0 = 100000, nchunk_1=30000 + k=min(chunk_size, mesh%nod2D-nchunk*chunk_size) + if (mype==0) then + do n=1, k + read(fileID,*) rbuff(n) + end do end if - end do - end do ! --> do nchunk=0, (mesh%nod2D-1)/chunk_size - if (mype==0) close(fileID) - if (mesh_check/=myDim_nod2D+eDim_nod2D) then - write(*,*) - print *, achar(27)//'[33m' - write(*,*) '____________________________________________________________________' - write(*,*) ' ERROR: while reading cavity_depth.out on mype=', mype - write(*,*) ' ',mesh_check, ' values have been read in according to partitioning' - write(*,*) ' it does not equal to myDim_nod2D+eDim_nod2D = ', myDim_nod2D+eDim_nod2D - write(*,*) '____________________________________________________________________' - print *, achar(27)//'[0m' - end if + + !___________________________________________________________________ + ! broadcast chunk buffer to all other CPUs (k...size of buffer) + call MPI_BCast(rbuff(1:k), k, MPI_DOUBLE_PRECISION, 0, MPI_COMM_FESOM, ierror) + + !___________________________________________________________________ + ! fill the local arrays + do n=1, k + if (mapping(n)>0) then + mesh_check=mesh_check+1 + mesh%cavity_depth(mapping(n))=rbuff(n) + end if + end do + end do ! --> do nchunk=0, (mesh%nod2D-1)/chunk_size + + !_______________________________________________________________________ + if (mype==0) close(fileID) + + !_______________________________________________________________________ + if (mesh_check/=myDim_nod2D+eDim_nod2D) then + write(*,*) + print *, achar(27)//'[33m' + write(*,*) '____________________________________________________________________' + write(*,*) ' ERROR: while reading cavity_depth.out on mype=', mype + write(*,*) ' ',mesh_check, ' values have been read in according to partitioning' + write(*,*) ' it does not equal to myDim_nod2D+eDim_nod2D = ', myDim_nod2D+eDim_nod2D + write(*,*) '____________________________________________________________________' + print *, achar(27)//'[0m' + end if + end if ! --> if (use_cavityonelem) then !___________________________________________________________________________ ! deallocate mapping and buffer array - deallocate(ibuff) + deallocate(rbuff) deallocate(mapping) !___________________________________________________________________________ @@ -1283,6 +1563,32 @@ subroutine find_levels_cavity(mesh) end if end do + + !___________________________________________________________________________ + allocate(numelemtonode(mesh%nl)) + do node=1, myDim_nod2D+eDim_nod2D + numelemtonode=0 + !_______________________________________________________________________ + do j=1,mesh%nod_in_elem2D_num(node) + elem=mesh%nod_in_elem2D(j,node) + do nz=mesh%ulevels(elem),mesh%nlevels(elem)-1 + numelemtonode(nz) = numelemtonode(nz) + 1 + end do + end do + + !_______________________________________________________________________ + ! check how many triangle elements contribute to every vertice in every layer + ! every vertice in every layer should be connected to at least two triangle + ! elements ! + do nz=mesh%ulevels_nod2D(node),mesh%nlevels_nod2D(node)-1 + if (numelemtonode(nz)== 1) then + write(*,*) 'ERROR A: found vertice with just one triangle:', mype, node, nz + end if + end do + + end do + deallocate(numelemtonode) + end subroutine find_levels_cavity ! ! @@ -1291,21 +1597,23 @@ end subroutine find_levels_cavity ! cavity_elvls.out that are created during the partitioning when namelist.config flag ! use_cavity=.True. !_______________________________________________________________________________ -subroutine find_levels_min_e2n(mesh) +subroutine find_levels_min_e2n(partit, mesh) use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP use o_PARAM - use g_PARSUP use g_config use g_comm_auto ! implicit none ! - type(t_mesh), intent(inout), target :: mesh + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit integer :: node, k real(kind=WP) :: t0, t1 -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() !___________________________________________________________________________ @@ -1317,8 +1625,8 @@ subroutine find_levels_min_e2n(mesh) mesh%nlevels_nod2D_min(node)=minval(mesh%nlevels(mesh%nod_in_elem2D(1:k,node))) mesh%ulevels_nod2D_max(node)=maxval(mesh%ulevels(mesh%nod_in_elem2D(1:k,node))) end do - call exchange_nod(mesh%nlevels_nod2D_min) - call exchange_nod(mesh%ulevels_nod2D_max) + call exchange_nod(mesh%nlevels_nod2D_min, partit) + call exchange_nod(mesh%ulevels_nod2D_max, partit) !___________________________________________________________________________ t1=MPI_Wtime() @@ -1332,20 +1640,26 @@ end subroutine find_levels_min_e2n ! ! !=========================================================================== -SUBROUTINE test_tri(mesh) +SUBROUTINE test_tri(partit, mesh) USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP USE o_PARAM -USE g_PARSUP USE g_CONFIG use g_rotate_grid IMPLICIT NONE ! Check the order of nodes in triangles; correct it if necessary to make ! it same sense (clockwise) -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit real(kind=WP) :: a(2), b(2), c(2), r integer :: n, nx, elnodes(3) real(kind=WP) :: t0, t1 +#include "associate_part_def.h" +#include "associate_part_ass.h" + + t0=MPI_Wtime() DO n=1, myDim_elem2D @@ -1378,13 +1692,15 @@ SUBROUTINE test_tri(mesh) END SUBROUTINE test_tri !========================================================================= -SUBROUTINE load_edges(mesh) +SUBROUTINE load_edges(partit, mesh) USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP USE o_PARAM -USE g_PARSUP USE g_CONFIG IMPLICIT NONE -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit character(MAX_PATH) :: file_name integer :: counter, n, m, nn, k, q, fileID integer :: elems(2), elem @@ -1396,8 +1712,8 @@ SUBROUTINE load_edges(mesh) integer, allocatable, dimension(:,:) :: ibuff integer :: ierror ! return error code -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() @@ -1609,7 +1925,7 @@ SUBROUTINE load_edges(mesh) END SUBROUTINE load_edges !=========================================================================== -SUBROUTINE find_neighbors(mesh) +SUBROUTINE find_neighbors(partit, mesh) ! For each element three its element neighbors are found ! For each node the elements containing it are found ! Allocated are: @@ -1620,20 +1936,22 @@ SUBROUTINE find_neighbors(mesh) USE o_PARAM USE MOD_MESH -USE g_PARSUP +USE MOD_PARTIT +USE MOD_PARSUP USE g_ROTATE_grid use g_comm_auto use elem_center_interface implicit none -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit integer :: elem, eledges(3), elem1, j, n, node, enum,elems(3),count1,count2,exit_flag,i,nz integer, allocatable :: temp_i(:) -integer :: mymax(npes), rmax(npes) +integer :: mymax(partit%npes), rmax(partit%npes) real(kind=WP) :: gx,gy,rx,ry real(kind=WP) :: t0, t1 -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" CALL MPI_BARRIER(MPI_COMM_FESOM, MPIerr) t0=MPI_Wtime() @@ -1689,7 +2007,7 @@ SUBROUTINE find_neighbors(mesh) end do end do - call exchange_nod(mesh%nod_in_elem2D_num) + call exchange_nod(mesh%nod_in_elem2D_num, partit) allocate (temp_i(myDim_nod2D+eDim_nod2D)) temp_i=0 DO n=1, maxval(rmax) @@ -1697,7 +2015,7 @@ SUBROUTINE find_neighbors(mesh) do j=1,myDim_nod2D if (mesh%nod_in_elem2D(n,j)>0) temp_i(j)=myList_elem2D(mesh%nod_in_elem2D(n,j)) enddo - call exchange_nod(temp_i) + call exchange_nod(temp_i, partit) mesh%nod_in_elem2D(n,:)=temp_i END DO deallocate(temp_i) @@ -1727,7 +2045,7 @@ SUBROUTINE find_neighbors(mesh) END DO if (elem1<2) then write(*,*) 'Insufficient number of neighbors ', myList_elem2D(elem) - call par_ex(1) + call par_ex(partit%MPI_COMM_FESOM, partit%mype, 1) STOP end if END DO @@ -1764,9 +2082,10 @@ subroutine edge_center(n1, n2, x, y, mesh) USE o_PARAM USE g_CONFIG implicit none -integer :: n1, n2 ! nodes of the edge -real(kind=WP) :: x, y, a(2), b(2) -type(t_mesh), intent(inout), target :: mesh +integer :: n1, n2 ! nodes of the edge +real(kind=WP), intent(inout) :: x, y +type(t_mesh), intent(inout), target :: mesh +real(kind=WP) :: a(2), b(2) a=mesh%coord_nod2D(:,n1) b=mesh%coord_nod2D(:,n2) @@ -1782,13 +2101,13 @@ subroutine elem_center(elem, x, y, mesh) USE o_PARAM USE g_CONFIG implicit none -integer :: elem, elnodes(3), k -real(kind=WP) :: x, y, ax(3), amin - -type(t_mesh), intent(inout), target :: mesh +real(kind=WP), intent(inout) :: x, y +type(t_mesh), intent(inout), target :: mesh +integer :: elem, elnodes(3), k +real(kind=WP) :: ax(3), amin elnodes=mesh%elem2D_nodes(:,elem) - ax=mesh%coord_nod2D(1, elnodes) + ax=mesh%coord_nod2D(1, elnodes) amin=minval(ax) DO k=1,3 if(ax(k)-amin>=cyclic_length/2.0_WP) ax(k)=ax(k)-cyclic_length @@ -1799,143 +2118,267 @@ subroutine elem_center(elem, x, y, mesh) end subroutine elem_center !========================================================================== -SUBROUTINE mesh_areas(mesh) -USE MOD_MESH -USE o_PARAM -USE g_PARSUP -USE g_ROTATE_GRID -use g_comm_auto -IMPLICIT NONE -! Collects auxilliary information on the mesh -! Allocated and filled in are: -! elem_area(myDim_elem2D) -! area(nl, myDim_nod2D) +SUBROUTINE mesh_areas(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE o_PARAM + USE o_arrays, only: dum_3d_n + USE g_ROTATE_GRID + use g_comm_auto + IMPLICIT NONE + ! Collects auxilliary information on the mesh + ! Allocated and filled in are: + ! elem_area(myDim_elem2D) + ! area(nl, myDim_nod2D) + integer :: n,j,q, elnodes(3), ed(2), elem, nz,nzmin, nzmax + real(kind=WP) :: a(2), b(2), ax, ay, lon, lat, vol, vol2 + real(kind=WP), allocatable,dimension(:) :: work_array + integer, allocatable,dimension(:,:) :: cavity_contribut + real(kind=WP) :: t0, t1 +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit -integer :: n,j,q, elnodes(3), ed(2), elem, nz -real(kind=WP) :: a(2), b(2), ax, ay, lon, lat, vol -real(kind=WP), allocatable,dimension(:) :: work_array -real(kind=WP) :: t0, t1 -type(t_mesh), intent(inout), target :: mesh -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" -t0=MPI_Wtime() - - allocate(mesh%elem_area(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) - !allocate(elem_area(myDim_elem2D)) - allocate(mesh%area(mesh%nl,myDim_nod2d+eDim_nod2D)) !! Extra size just for simplicity - !! in some further routines - allocate(mesh%area_inv(mesh%nl,myDim_nod2d+eDim_nod2D)) - allocate(mesh%mesh_resolution(myDim_nod2d+eDim_nod2D)) - ! ============ - ! The areas of triangles: - ! ============ - DO n=1, myDim_elem2D - !DO n=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D - elnodes=mesh%elem2D_nodes(:,n) - ay=sum(mesh%coord_nod2D(2,elnodes))/3.0_WP - ay=cos(ay) - if (cartesian) ay=1.0_WP - a = mesh%coord_nod2D(:,elnodes(2))-mesh%coord_nod2D(:,elnodes(1)) - b = mesh%coord_nod2D(:,elnodes(3))-mesh%coord_nod2D(:,elnodes(1)) - call trim_cyclic(a(1)) - call trim_cyclic(b(1)) - a(1)=a(1)*ay - b(1)=b(1)*ay - mesh%elem_area(n)=0.5_WP*abs(a(1)*b(2)-b(1)*a(2)) - END DO - call exchange_elem(mesh%elem_area) - ! ============= - ! Scalar element - ! areas at different levels (there can be partly land) - ! ============= - - mesh%area=0.0_WP - DO n=1, myDim_nod2D - DO j=1,mesh%nod_in_elem2D_num(n) - elem=mesh%nod_in_elem2D(j,n) - !!PS DO nz=mesh%ulevels(elem),mesh%nlevels(elem)-1 - DO nz=1,mesh%nlevels(elem)-1 - mesh%area(nz,n)=mesh%area(nz,n)+mesh%elem_area(elem)/3.0_WP - END DO - END DO - END DO + t0=MPI_Wtime() + + ! area of triangles + allocate(mesh%elem_area(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) + + ! area of upper edge and lower edge of scalar cell: size nl x node + allocate(mesh%area(mesh%nl,myDim_nod2d+eDim_nod2D)) + + ! "mid" area of scalar cell in case of cavity area \= areasvol, size: nl-1 x node + allocate(mesh%areasvol(mesh%nl,myDim_nod2d+eDim_nod2D)) + + ! area inverse + allocate(mesh%area_inv(mesh%nl,myDim_nod2d+eDim_nod2D)) + allocate(mesh%areasvol_inv(mesh%nl,myDim_nod2d+eDim_nod2D)) + + ! resolution at nodes + allocate(mesh%mesh_resolution(myDim_nod2d+eDim_nod2D)) + + !___compute triangle areas__________________________________________________ + do n=1, myDim_elem2D + elnodes=mesh%elem2D_nodes(:,n) + ay=sum(mesh%coord_nod2D(2,elnodes))/3.0_WP + ay=cos(ay) + if (cartesian) ay=1.0_WP + a = mesh%coord_nod2D(:,elnodes(2))-mesh%coord_nod2D(:,elnodes(1)) + b = mesh%coord_nod2D(:,elnodes(3))-mesh%coord_nod2D(:,elnodes(1)) + call trim_cyclic(a(1)) + call trim_cyclic(b(1)) + a(1)=a(1)*ay + b(1)=b(1)*ay + mesh%elem_area(n)=0.5_WP*abs(a(1)*b(2)-b(1)*a(2)) + end do + call exchange_elem(mesh%elem_area, partit) + + !___compute areas of upper/lower scalar cell edge___________________________ + ! areas at different levels (there can be partly land) + ! --> only areas through which there is exchange are counted + ! + !-----------------------------~+~~~~~~~+~~~ + ! ############################ | | + ! ############################ | | layer k-3 + ! #################### ._______|_______|___area_k-2 + ! ## CAVITY ######## | / / / | | + ! #################### |/ /°/ /| | layer k-2 --> Transport: w_k-2*A_k-1 + ! ############ ._______|_/_/_/_|_______|___area_k-1 -> A_k-1 lower prisma area defines + ! ############ | | | | scalar area under the cavity + ! ############ | ° | | | layer k-1 + !______________|_______|_______|_______|___area_k + ! | | / / / | | | + ! | |/ /°/ /| | | layer k --> Transport: w_k*A_k + !______|_______|_/_/_/_|_______|_______|___area_k+1 -> A_k upper prisma face area defines + ! | | | | | scalar area of cell + ! | | ° | | | layer k+1 + !______|_______|_______|_______|_______|___area_k+2 + ! #############| | | | + ! #############| ° | | | layer k+2 + ! #############|_______|_______|_______|___area_k+3 + ! #####################| | | + ! #####################| | | layer k+3 + ! ## BOTTOM #########|_______|_______|___area_k+4 + ! #############################| | + ! #############################| | : + ! #############################|_______|___area_k+5 + ! ######################################### + if (use_cavity) then + allocate(cavity_contribut(mesh%nl,myDim_nod2d+eDim_nod2D)) + cavity_contribut = 0 + end if + + mesh%area = 0.0_WP + do n=1, myDim_nod2D+eDim_nod2D + do j=1,mesh%nod_in_elem2D_num(n) + elem=mesh%nod_in_elem2D(j,n) + + !___________________________________________________________________ + ! compute scalar area of prisms at different depth layers. In normal + ! case without cavity the area of the scalar cell corresponds to the + ! area of the upper edge of the prism --> if there is cavity its + ! different. Directly under the cavity the area of scalar cell + ! corresponds to the area of the lower edge + nzmin = mesh%ulevels(elem) + nzmax = mesh%nlevels(elem)-1 + do nz=nzmin,nzmax + mesh%area(nz,n)=mesh%area(nz,n)+mesh%elem_area(elem)/3.0_WP + end do + + !___________________________________________________________________ + ! how many ocean-cavity triangles contribute to an upper edge of a + ! scalar area + if (use_cavity) then + do nz=1,nzmin-1 + cavity_contribut(nz,n)=cavity_contribut(nz,n)+1 + end do + end if + end do + end do + + !___compute "mid" scalar cell area__________________________________________ + ! for cavity case: redefine "mid" scalar cell area from upper edge of prism to + ! lower edge of prism if a cavity triangle is present at the upper scalar + ! cell edge + mesh%areasvol = 0.0_WP + if (use_cavity) then + do n = 1, myDim_nod2D+eDim_nod2D + nzmin = mesh%ulevels_nod2d(n) + nzmax = mesh%nlevels_nod2d(n)-1 + do nz=nzmin,nzmax + if (cavity_contribut(nz,n)>0) then + mesh%areasvol(nz,n) = mesh%area(min(nz+1,nzmax),n) + else + mesh%areasvol(nz,n) = mesh%area(nz,n) + end if + end do + end do + deallocate(cavity_contribut) + ! for non cavity case: the "mid" area of the scalar cell always corresponds to + ! the area of the upper scalar cell edge + else + do n = 1, myDim_nod2D+eDim_nod2D + nzmin = mesh%ulevels_nod2d(n) + nzmax = mesh%nlevels_nod2d(n)-1 + do nz=nzmin,nzmax + mesh%areasvol(nz,n) = mesh%area(nz,n) + end do + end do + end if + + ! update to proper dimension + ! coordinates are in radians, edge_dxdy are in meters, + ! and areas are in m^2 + mesh%elem_area = mesh%elem_area*r_earth*r_earth + mesh%area = mesh%area *r_earth*r_earth + mesh%areasvol = mesh%areasvol *r_earth*r_earth - ! Only areas through which there is exchange are counted - - ! =========== - ! Update to proper dimension - ! =========== - mesh%elem_area=mesh%elem_area*r_earth*r_earth - mesh%area=mesh%area*r_earth*r_earth + call exchange_nod(mesh%area, partit) + call exchange_nod(mesh%areasvol, partit) + + !___compute inverse area____________________________________________________ + mesh%area_inv = 0.0_WP + do n=1,myDim_nod2d+eDim_nod2D + nzmin = mesh%ulevels_nod2d(n) + nzmax = mesh%nlevels_nod2d(n) + do nz=nzmin,nzmax +!!PS mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) + if (mesh%area(nz,n) > 0._WP) then + mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) + else + mesh%area_inv(nz,n) = 0._WP + end if + end do + end do + + if (use_cavity) then + mesh%areasvol_inv = 0.0_WP + do n=1,myDim_nod2d+eDim_nod2D + nzmin = mesh%ulevels_nod2d(n) + nzmax = mesh%nlevels_nod2d(n)-1 + do nz=nzmin,nzmax +!!PS mesh%areasvol_inv(nz,n) = 1._WP/mesh%areasvol(nz,n) + if (mesh%areasvol(nz,n) > 0._WP) then + mesh%areasvol_inv(nz,n) = 1._WP/mesh%areasvol(nz,n) + else + mesh%areasvol_inv(nz,n) = 0._WP + end if + end do + end do + else + mesh%areasvol_inv = mesh%area_inv + endif - call exchange_nod(mesh%area) - -do n=1,myDim_nod2d+eDim_nod2D - do nz=1,mesh%nl - if (mesh%area(nz,n) > 0._WP) then - mesh%area_inv(nz,n) = 1._WP/mesh%area(nz,n) - else - mesh%area_inv(nz,n) = 0._WP - end if - end do -end do - ! coordinates are in radians, edge_dxdy are in meters, - ! and areas are in m^2 + !___compute scalar cell resolution__________________________________________ + allocate(work_array(myDim_nod2D)) + !!PS mesh%mesh_resolution=sqrt(mesh%area(1, :)/pi)*2._WP + do n=1,myDim_nod2d+eDim_nod2D + mesh%mesh_resolution(n)=sqrt(mesh%areasvol(mesh%ulevels_nod2d(n),n)/pi)*2._WP + end do + ! smooth resolution + do q=1, 3 !apply mass matrix N times to smooth the field + do n=1, myDim_nod2D + vol=0._WP + work_array(n)=0._WP + do j=1, mesh%nod_in_elem2D_num(n) + elem=mesh%nod_in_elem2D(j, n) + elnodes=mesh%elem2D_nodes(:,elem) + work_array(n)=work_array(n)+sum(mesh%mesh_resolution(elnodes))/3._WP*mesh%elem_area(elem) + vol=vol+mesh%elem_area(elem) + end do + work_array(n)=work_array(n)/vol + end do + do n=1,myDim_nod2D + mesh%mesh_resolution(n)=work_array(n) + end do + call exchange_nod(mesh%mesh_resolution, partit) + end do + deallocate(work_array) + + !___compute total ocean areas with/without cavity___________________________ + vol = 0.0_WP + vol2= 0.0_WP + do n=1, myDim_nod2D + vol2=vol2+mesh%area(mesh%ulevels_nod2D(n), n) ! area also under cavity + if (mesh%ulevels_nod2D(n)>1) cycle + vol=vol+mesh%area(1, n) ! area only surface + end do + mesh%ocean_area=0.0 + mesh%ocean_areawithcav=0.0 + call MPI_AllREDUCE(vol, mesh%ocean_area, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & + MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(vol2, mesh%ocean_areawithcav, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & + MPI_COMM_FESOM, MPIerr) + + !___write mesh statistics___________________________________________________ + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' --> mesh statistics:', mype + write(*,*) mype, 'maxArea ',maxval(mesh%elem_area), ' MinArea ', minval(mesh%elem_area) + write(*,*) mype, 'maxScArea ',maxval(mesh%area(1,:)), & + ' MinScArea ', minval(mesh%area(1,:)) + write(*,*) mype, 'Edges: ', mesh%edge2D, ' internal ', mesh%edge2D_in + if (mype==0) then + write(*,*) ' > Total ocean surface area is : ', mesh%ocean_area, ' m^2' + write(*,*) ' > Total ocean surface area wth cavity is: ', mesh%ocean_areawithcav, ' m^2' + end if + endif - allocate(work_array(myDim_nod2D)) - mesh%mesh_resolution=sqrt(mesh%area(1, :)/pi)*2._WP - DO q=1, 3 !apply mass matrix N times to smooth the field - DO n=1, myDim_nod2D - vol=0._WP - work_array(n)=0._WP - DO j=1, mesh%nod_in_elem2D_num(n) - elem=mesh%nod_in_elem2D(j, n) - elnodes=mesh%elem2D_nodes(:,elem) - work_array(n)=work_array(n)+sum(mesh%mesh_resolution(elnodes))/3._WP*mesh%elem_area(elem) - vol=vol+mesh%elem_area(elem) - END DO - work_array(n)=work_array(n)/vol - END DO - DO n=1,myDim_nod2D - mesh%mesh_resolution(n)=work_array(n) - ENDDO - call exchange_nod(mesh%mesh_resolution) - END DO - deallocate(work_array) - - vol=0.0_WP - do n=1, myDim_nod2D - vol=vol+mesh%area(1, n) - end do - mesh%ocean_area=0.0 - call MPI_AllREDUCE(vol, mesh%ocean_area, 1, MPI_DOUBLE_PRECISION, MPI_SUM, & - MPI_COMM_FESOM, MPIerr) - -if (mype==0) then - write(*,*) mype, 'Mesh statistics:' - write(*,*) mype, 'maxArea ',maxval(mesh%elem_area), ' MinArea ', minval(mesh%elem_area) - write(*,*) mype, 'maxScArea ',maxval(mesh%area(1,:)), & - ' MinScArea ', minval(mesh%area(1,:)) - write(*,*) mype, 'Edges: ', mesh%edge2D, ' internal ', mesh%edge2D_in - if (mype==0) then - write(*,*) 'Total ocean area is: ', mesh%ocean_area, ' m^2' - end if -endif - -t1=MPI_Wtime() -if (mype==0) then - write(*,*) 'mesh_areas finished in ', t1-t0, ' seconds' - write(*,*) '=========================' -endif + t1=MPI_Wtime() + if (mype==0) then + write(*,*) ' > mesh_areas finished in ', t1-t0, ' seconds' + endif END SUBROUTINE mesh_areas !=================================================================== -SUBROUTINE mesh_auxiliary_arrays(mesh) +SUBROUTINE mesh_auxiliary_arrays(partit, mesh) ! Collects auxiliary information needed to speed up computations ! of gradients, div. This also makes implementation of cyclicity ! much more straightforward @@ -1949,9 +2392,9 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) ! coriolis(myDim_elem2D) USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP USE o_PARAM -USE i_PARAM -USE g_PARSUP USE o_ARRAYS USE g_ROTATE_grid use g_comm_auto @@ -1966,10 +2409,11 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) real(kind=WP), allocatable :: center_x(:), center_y(:), temp(:) real(kind=WP) :: t0, t1 integer :: i, nn, ns -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit -!NR Cannot include the pointers before the targets are allocated... -!NR #include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_part_ass.h" t0=MPI_Wtime() allocate(mesh%edge_dxdy(2,myDim_edge2D+eDim_edge2D)) @@ -1978,8 +2422,8 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) allocate(mesh%gradient_vec(6,myDim_elem2D)) allocate(mesh%metric_factor(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) allocate(mesh%elem_cos(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) - allocate(coriolis(myDim_elem2D)) - allocate(coriolis_node(myDim_nod2D+eDim_nod2D)) + allocate(mesh%coriolis(myDim_elem2D)) + allocate(mesh%coriolis_node(myDim_nod2D+eDim_nod2D)) allocate(mesh%geo_coord_nod2D(2,myDim_nod2D+eDim_nod2D)) allocate(center_x(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) allocate(center_y(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) @@ -1990,7 +2434,7 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) ! ============ DO n=1,myDim_nod2D+eDim_nod2D call r2g(lon, lat, mesh%coord_nod2D(1,n), mesh%coord_nod2D(2,n)) - coriolis_node(n)=2*omega*sin(lat) + mesh%coriolis_node(n)=2*omega*sin(lat) END DO DO n=1,myDim_nod2D+eDim_nod2D @@ -2006,11 +2450,11 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) DO n=1,myDim_elem2D call elem_center(n, ax, ay, mesh) call r2g(lon, lat, ax, ay) - coriolis(n)=2*omega*sin(lat) + mesh%coriolis(n)=2*omega*sin(lat) END DO if(fplane) then - coriolis=2*omega*0.71_WP + mesh%coriolis=2*omega*0.71_WP end if ! ============ @@ -2024,10 +2468,10 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) mesh%metric_factor=tan(ay)/r_earth END DO - call exchange_elem(mesh%metric_factor) - call exchange_elem(mesh%elem_cos) - call exchange_elem(center_x) - call exchange_elem(center_y) + call exchange_elem(mesh%metric_factor, partit) + call exchange_elem(mesh%elem_cos, partit) + call exchange_elem(center_x, partit) + call exchange_elem(center_y, partit) if (cartesian) then mesh%elem_cos=1.0_WP mesh%metric_factor=0.0_WP @@ -2242,16 +2686,16 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) END DO deallocate(center_y, center_x) - !array of 2D boundary conditions is used in ice_maEVP - if (whichEVP > 0) then - allocate(mesh%bc_index_nod2D(myDim_nod2D+eDim_nod2D)) - mesh%bc_index_nod2D=1._WP - do n=1, myDim_edge2D - ed=mesh%edges(:, n) - if (myList_edge2D(n) <= mesh%edge2D_in) cycle - mesh%bc_index_nod2D(ed)=0._WP - end do - end if +! !array of 2D boundary conditions is used in ice_maEVP +! if (whichEVP > 0) then +! allocate(mesh%bc_index_nod2D(myDim_nod2D+eDim_nod2D)) +! mesh%bc_index_nod2D=1._WP +! do n=1, myDim_edge2D +! ed=mesh%edges(:, n) +! if (myList_edge2D(n) <= mesh%edge2D_in) cycle +! mesh%bc_index_nod2D(ed)=0._WP +! end do +! end if #if defined (__oasis) nn=0 @@ -2262,10 +2706,10 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) do i=1, myDim_nod2D if (mesh%geo_coord_nod2D(2, i) > 0) then nn=nn+1 - mesh%lump2d_north(i)=mesh%area(1, i) + mesh%lump2d_north(i)=mesh%areasvol(mesh%ulevels_nod2d(i), i) else ns=ns+1 - mesh%lump2d_south(i)=mesh%area(1, i) + mesh%lump2d_south(i)=mesh%area(mesh%ulevels_nod2d(i), i) end if end do @@ -2291,13 +2735,14 @@ SUBROUTINE mesh_auxiliary_arrays(mesh) endif END SUBROUTINE mesh_auxiliary_arrays - -!=================================================================== - -SUBROUTINE check_mesh_consistency(mesh) +! +! +!_______________________________________________________________________________ +SUBROUTINE check_mesh_consistency(partit, mesh) USE MOD_MESH +USE MOD_PARTIT +USE MOD_PARSUP USE o_PARAM -USE g_PARSUP USE g_ROTATE_GRID use g_comm_auto IMPLICIT NONE @@ -2305,10 +2750,12 @@ SUBROUTINE check_mesh_consistency(mesh) ! Allocated and filled in are: ! elem_area(myDim_elem2D) ! area(nl, myDim_nod2D) -type(t_mesh), intent(inout), target :: mesh +type(t_mesh), intent(inout), target :: mesh +type(t_partit), intent(inout), target :: partit integer :: nz, n, elem , elnodes(3) real(kind=WP) :: vol_n(mesh%nl), vol_e(mesh%nl), aux(mesh%nl) - +#include "associate_part_def.h" +#include "associate_part_ass.h" vol_n=0._WP vol_e=0._WP @@ -2316,7 +2763,7 @@ SUBROUTINE check_mesh_consistency(mesh) aux=0._WP do n=1, myDim_nod2D do nz=mesh%ulevels_nod2D(n), mesh%nlevels_nod2D(n)-1 - aux(nz)=aux(nz)+mesh%area(nz, n) + aux(nz)=aux(nz)+mesh%areasvol(nz, n) end do end do call MPI_AllREDUCE(aux, vol_n, mesh%nl, MPI_DOUBLE_PRECISION, MPI_SUM, & @@ -2326,7 +2773,7 @@ SUBROUTINE check_mesh_consistency(mesh) do elem=1, myDim_elem2D elnodes=mesh%elem2D_nodes(:, elem) if (elnodes(1) > myDim_nod2D) CYCLE - do nz=mesh%ulevels(elem), mesh%nlevels(elem) + do nz=mesh%ulevels(elem), mesh%nlevels(elem)-1 aux(nz)=aux(nz)+mesh%elem_area(elem) end do end do @@ -2341,7 +2788,65 @@ SUBROUTINE check_mesh_consistency(mesh) write(*,*) '***end level area_test***' end if -!call par_ex +!call par_ex(partit%MPI_COMM_FESOM, partit%mype) !stop END SUBROUTINE check_mesh_consistency -!================================================================== +! +! +!_______________________________________________________________________________ +subroutine check_total_volume(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE o_PARAM + use g_comm_auto + use o_ARRAYS + + IMPLICIT NONE + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: nz, n, elem , elnodes(3) + real(kind=WP) :: vol_n, vol_e, aux + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ + vol_n=0._WP + vol_e=0._WP + !___________________________________________________________________________ + ! total ocean volume on nodes + aux=0._WP + do n=1, myDim_nod2D + do nz=ulevels_nod2D(n), nlevels_nod2D(n)-1 + aux=aux+areasvol(nz, n)*hnode(nz,n) + end do + end do + call MPI_AllREDUCE(aux, vol_n, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + !___________________________________________________________________________ + ! total ocean volume on elements + aux=0._WP + do elem=1, myDim_elem2D + elnodes=elem2D_nodes(:, elem) + if (elnodes(1) > myDim_nod2D) cycle + do nz=ulevels(elem), nlevels(elem)-1 + aux=aux+elem_area(elem)*helem(nz,elem) + end do + end do + call MPI_AllREDUCE(aux, vol_e, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + + !___write mesh statistics___________________________________________________ + if (mype==0) then + write(*,*) '____________________________________________________________________' + write(*,*) ' --> ocean volume check:', mype + write(*,*) ' > Total ocean volume node:', vol_n, ' m^3' + write(*,*) ' > Total ocean volume elem:', vol_e, ' m^3' + + end if + +end subroutine check_total_volume +! +! +!_______________________________________________________________________________ diff --git a/src/oce_mo_conv.F90 b/src/oce_mo_conv.F90 index f9ddac0c4..7045794d2 100644 --- a/src/oce_mo_conv.F90 +++ b/src/oce_mo_conv.F90 @@ -1,27 +1,40 @@ ! ! !_______________________________________________________________________________ -subroutine mo_convect(mesh) +subroutine mo_convect(ice, partit, mesh) USE o_PARAM USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_ICE USE o_ARRAYS - USE g_PARSUP USE g_config - use i_arrays use g_comm_auto IMPLICIT NONE - - integer :: node, elem, nz, elnodes(3), nzmin, nzmax - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + type(t_ice), intent(in), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: node, elem, nz, elnodes(3), nzmin, nzmax + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: u_ice, v_ice, a_ice +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + !___________________________________________________________________________ ! add vertical mixing scheme of Timmermann and Beckmann, 2004,"Parameterization ! of vertical mixing in the Weddell Sea! ! Computes the mixing length derived from the Monin if (use_momix) then - mo = 0._WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax) do node=1, myDim_nod2D+eDim_nod2D + mo(:, node) = 0._WP nzmax = nlevels_nod2d(node) nzmin = ulevels_nod2d(node) !___________________________________________________________________ @@ -53,11 +66,12 @@ subroutine mo_convect(mesh) end if end do end do +!$OMP END PARALLEL DO end if - ! !___________________________________________________________________________ ! apply mixing enhancements to vertical diffusivity +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(node, nz, nzmin, nzmax) do node=1, myDim_nod2D+eDim_nod2D nzmax = nlevels_nod2d(node) nzmin = ulevels_nod2d(node) @@ -73,11 +87,12 @@ subroutine mo_convect(mesh) end do end do - +!$OMP END PARALLEL DO ! !___________________________________________________________________________ ! apply mixing enhancements to vertical viscosity ! elem2D_nodes has no dimension until +eDim_elem2D +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax) do elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) nzmax = nlevels(elem) @@ -98,7 +113,7 @@ subroutine mo_convect(mesh) if (use_windmix .and. nz<=windmix_nl+1) Av(nz,elem)=max(Av(nz,elem), windmix_kv) end do end do - !!PS call exchange_elem(Av) +!$OMP END PARALLEL DO end subroutine mo_convect ! ! diff --git a/src/oce_modules.F90 b/src/oce_modules.F90 index 6ac8506bc..6c36aa257 100755 --- a/src/oce_modules.F90 +++ b/src/oce_modules.F90 @@ -23,14 +23,7 @@ MODULE o_PARAM real(kind=WP) :: C_d= 0.0025_WP ! Bottom drag coefficient real(kind=WP) :: kappa=0.4 !von Karman's constant real(kind=WP) :: mix_coeff_PP=0.01_WP ! mixing coef for PP scheme -real(kind=WP) :: gamma0=0.01! [m/s], gamma0*len*dt is the background viscosity -real(kind=WP) :: gamma1=0.1! [non dim.], or computation of the flow aware viscosity -real(kind=WP) :: gamma2=10.! [s/m], is only used in easy backscatter option -real(kind=WP) :: Div_c =1.0_WP !modified Leith viscosity weight -real(kind=WP) :: Leith_c=1.0_WP !Leith viscosity weight. It needs vorticity! -real(kind=WP) :: easy_bs_return=1.0 !backscatter option only (how much to return) real(kind=WP) :: A_ver=0.001_WP ! Vertical harm. visc. -integer :: visc_option=5 logical :: uke_scaling=.true. real(kind=WP) :: uke_scaling_factor=1._WP real(kind=WP) :: rosb_dis=1._WP @@ -81,12 +74,6 @@ MODULE o_PARAM ! elevation and divergence real(kind=WP) :: epsilon=0.1_WP ! AB2 offset ! Tracers -logical :: i_vert_diff= .true. -logical :: i_vert_visc= .true. -character(20) :: tra_adv_ver, tra_adv_hor, tra_adv_lim -real(kind=WP) :: tra_adv_ph, tra_adv_pv -logical :: w_split =.false. -real(kind=WP) :: w_max_cfl=1.e-5_WP logical :: SPP=.false. @@ -96,18 +83,15 @@ MODULE o_PARAM integer, allocatable, dimension(:) :: ind2 END TYPE tracer_source3d_type -integer :: num_tracers=2 -integer, dimension(100) :: tracer_ID = RESHAPE((/0, 1/), (/100/), (/0/)) ! ID for each tracer for treating the initialization and surface boundary condition - ! 0=temp, 1=salt etc. type(tracer_source3d_type), & allocatable, dimension(:) :: ptracers_restore integer :: ptracers_restore_total=0 ! Momentum -logical :: free_slip=.false. - ! false=no slip -integer :: mom_adv=2 +!!PS logical :: free_slip=.false. +!!PS ! false=no slip +!!PS integer :: mom_adv=2 ! 1 vector control volumes, p1 velocities ! 2 scalar control volumes ! 3 vector invariant @@ -134,17 +118,16 @@ MODULE o_PARAM real(kind=WP) :: windmix_kv = 1.e-3 integer :: windmix_nl = 2 -! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). -logical :: smooth_bh_tra = .false. -real(kind=WP) :: gamma0_tra = 0.0005 -real(kind=WP) :: gamma1_tra = 0.0125 -real(kind=WP) :: gamma2_tra = 0. !_______________________________________________________________________________ ! use non-constant reference density if .false. density_ref=density_0 logical :: use_density_ref = .false. real(kind=WP) :: density_ref_T = 2.0_WP real(kind=WP) :: density_ref_S = 34.0_WP +!_______________________________________________________________________________ +! use k-profile nonlocal fluxes +logical :: use_kpp_nonlclflx = .false. + !_______________________________________________________________________________ ! *** active tracer cutoff logical :: limit_salinity=.true. !set an allowed range for salinity @@ -168,151 +151,74 @@ MODULE o_PARAM character(20) :: which_pgf='shchepetkin' - NAMELIST /oce_dyn/ state_equation, C_d, A_ver, gamma0, gamma1, gamma2, Leith_c, Div_c, easy_bs_return, & - scale_area, mom_adv, free_slip, i_vert_visc, w_split, w_max_cfl, SPP,& + NAMELIST /oce_dyn/ state_equation, C_d, A_ver, & + scale_area, SPP,& Fer_GM, K_GM_max, K_GM_min, K_GM_bvref, K_GM_resscalorder, K_GM_rampmax, K_GM_rampmin, & scaling_Ferreira, scaling_Rossby, scaling_resolution, scaling_FESOM14, & - Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, visc_option, alpha, theta, use_density_ref, & + Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, alpha, theta, use_density_ref, & K_back, c_back, uke_scaling, uke_scaling_factor, smooth_back, smooth_dis, & smooth_back_tend, rosb_dis - NAMELIST /oce_tra/ diff_sh_limit, Kv0_const, double_diffusion, K_ver, K_hor, surf_relax_T, surf_relax_S, & - balance_salt_water, clim_relax, ref_sss_local, ref_sss, i_vert_diff, tra_adv_ver, tra_adv_hor, & - tra_adv_lim, tra_adv_ph, tra_adv_pv, num_tracers, tracer_ID, & + NAMELIST /tracer_phys/ diff_sh_limit, Kv0_const, double_diffusion, K_ver, K_hor, surf_relax_T, surf_relax_S, & + balance_salt_water, clim_relax, ref_sss_local, ref_sss, & use_momix, momix_lat, momix_kv, & use_instabmix, instabmix_kv, & use_windmix, windmix_kv, windmix_nl, & - smooth_bh_tra, gamma0_tra, gamma1_tra, gamma2_tra + use_kpp_nonlclflx END MODULE o_PARAM -!========================================================== - -!========================================================== -MODULE o_MESH -USE o_PARAM -USE, intrinsic :: ISO_FORTRAN_ENV -! All variables used to keep the mesh structure + -! auxiliary variables involved in implementation -! of open boundaries and advection schemes -! -! The fct part -real(kind=WP),allocatable,dimension(:,:) :: fct_LO ! Low-order solution -real(kind=WP),allocatable,dimension(:,:) :: adv_flux_hor ! Antidif. horiz. contrib. from edges / backup for iterafive fct scheme -real(kind=WP),allocatable,dimension(:,:) :: adv_flux_ver ! Antidif. vert. fluxes from nodes / backup for iterafive fct scheme - -real(kind=WP),allocatable,dimension(:,:) :: fct_ttf_max,fct_ttf_min -real(kind=WP),allocatable,dimension(:,:) :: fct_plus,fct_minus -! Quadratic reconstruction part -integer,allocatable,dimension(:) :: nn_num, nboundary_lay -real(kind=WP),allocatable,dimension(:,:,:) :: quad_int_mat, quad_int_coef -integer,allocatable,dimension(:,:) :: nn_pos -! MUSCL type reconstruction -integer,allocatable,dimension(:,:) :: edge_up_dn_tri -real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad -end module o_MESH -!========================================================== - !========================================================== MODULE o_ARRAYS USE o_PARAM IMPLICIT NONE ! Arrays are described in subroutine array_setup -real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) -real(kind=WP), allocatable :: UV(:,:,:) -real(kind=WP), allocatable :: UV_rhs(:,:,:), UV_rhsAB(:,:,:) real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) -real(kind=WP), allocatable :: eta_n(:), d_eta(:) -real(kind=WP), allocatable :: ssh_rhs(:), hpressure(:,:) -real(kind=WP), allocatable :: CFL_z(:,:) +real(kind=WP), allocatable :: hpressure(:,:) real(kind=WP), allocatable :: stress_surf(:,:) +real(kind=WP), allocatable :: stress_node_surf(:,:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) REAL(kind=WP), ALLOCATABLE :: stress_atmoce_y(:) -real(kind=WP), allocatable :: T_rhs(:,:) real(kind=WP), allocatable :: heat_flux(:), Tsurf(:) real(kind=WP), allocatable :: heat_flux_in(:) !to keep the unmodified (by SW penetration etc.) heat flux -real(kind=WP), allocatable :: S_rhs(:,:) -real(kind=WP), allocatable :: tr_arr(:,:,:),tr_arr_old(:,:,:) -real(kind=WP), allocatable :: del_ttf(:,:) -real(kind=WP), allocatable :: del_ttf_advhoriz(:,:),del_ttf_advvert(:,:) !!PS ,del_ttf_diff(:,:) - real(kind=WP), allocatable :: water_flux(:), Ssurf(:) real(kind=WP), allocatable :: virtual_salt(:), relax_salt(:) real(kind=WP), allocatable :: Tclim(:,:), Sclim(:,:) -real(kind=WP), allocatable :: Visc(:,:) +!!PS real(kind=WP), allocatable :: Visc(:,:) real(kind=WP), allocatable :: Tsurf_t(:,:), Ssurf_t(:,:) real(kind=WP), allocatable :: tau_x_t(:,:), tau_y_t(:,:) real(kind=WP), allocatable :: heat_flux_t(:,:), heat_rel_t(:,:), heat_rel(:) -real(kind=WP), allocatable :: coriolis(:), coriolis_node(:) +!!PS real(kind=WP), allocatable :: coriolis(:), coriolis_node(:) real(kind=WP), allocatable :: relax2clim(:) real(kind=WP), allocatable :: MLD1(:), MLD2(:) integer, allocatable :: MLD1_ind(:), MLD2_ind(:) real(kind=WP), allocatable :: ssh_gp(:) -! Passive and age tracers -real(kind=WP), allocatable :: tracer(:,:,:), tracer_rhs(:,:,:) !Tracer gradients&RHS -real(kind=WP), allocatable :: ttrhs(:,:) real(kind=WP), allocatable :: tr_xy(:,:,:) real(kind=WP), allocatable :: tr_z(:,:) -! Auxiliary arrays for vector-invariant form of momentum advection -real(kind=WP), allocatable,dimension(:,:) :: vorticity - !Viscosity and diff coefs real(kind=WP), allocatable,dimension(:,:) :: Av,Kv real(kind=WP), allocatable,dimension(:,:,:) :: Kv_double real(kind=WP), allocatable,dimension(:) :: Kv0 !Velocities interpolated to nodes -real(kind=WP), allocatable,dimension(:,:,:) :: Unode +!!PS real(kind=WP), allocatable,dimension(:,:,:) :: Unode ! Auxiliary arrays to store Redi-GM fields real(kind=WP), allocatable,dimension(:,:,:) :: neutral_slope real(kind=WP), allocatable,dimension(:,:,:) :: slope_tapered real(kind=WP), allocatable,dimension(:,:,:) :: sigma_xy real(kind=WP), allocatable,dimension(:,:) :: sw_beta, sw_alpha +real(kind=WP), allocatable,dimension(:) :: dens_flux !real(kind=WP), allocatable,dimension(:,:,:) :: tsh, tsv, tsh_nodes !real(kind=WP), allocatable,dimension(:,:) :: hd_flux,vd_flux !Isoneutral diffusivities (or xy diffusivities if Redi=.false) real(kind=WP), allocatable :: Ki(:,:) -!_______________________________________________________________________________ -! Arrays added for ALE implementation: -! --> layer thinkness at node and depthlayer for t=n and t=n+1 -real(kind=WP), allocatable,dimension(:,:) :: hnode, hnode_new, zbar_3d_n, Z_3d_n - -! --> layer thinkness at elements, interpolated from hnode -real(kind=WP), allocatable,dimension(:,:) :: helem - -! --> thinkness of bottom elem (important for partial cells) -real(kind=WP), allocatable,dimension(:) :: bottom_elem_thickness -real(kind=WP), allocatable,dimension(:) :: bottom_node_thickness - -! --> The increment of total fluid depth on elements. It is used to update the matrix -real(kind=WP), allocatable,dimension(:) :: dhe - -! --> hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. -real(kind=WP), allocatable,dimension(:) :: hbar, hbar_old - ! --> auxiliary array to store an intermediate part of the rhs computations. real(kind=WP), allocatable,dimension(:) :: ssh_rhs_old !, ssh_rhs_old2 !PS - -! --> auxiliary array to store depth of layers and depth of mid level due to changing -! layer thinkness at every node -real(kind=WP), allocatable,dimension(:) :: zbar_n, Z_n - -! new bottom depth at node and element due to partial cells -real(kind=WP), allocatable,dimension(:) :: zbar_n_bot -real(kind=WP), allocatable,dimension(:) :: zbar_e_bot - -! new depth of cavity-ocean interface at node and element due to partial cells -real(kind=WP), allocatable,dimension(:) :: zbar_n_srf -real(kind=WP), allocatable,dimension(:) :: zbar_e_srf - -! --> multiplication factor for surface boundary condition in -! diff_ver_part_impl_ale(tr_num) between linfs -->=0.0 and noninfs -! (zlevel,zstar...) --> = 1.0 real(kind=WP) :: is_nonlinfs !_______________________________________________________________________________ @@ -325,7 +231,7 @@ MODULE o_ARRAYS !_______________________________________________________________________________ !!PS ! dummy arrays -!!PS real(kind=WP), allocatable,dimension(:,:) :: dum_3d_n, dum_3d_e +real(kind=WP), allocatable,dimension(:,:) :: dum_3d_n !, dum_3d_e !!PS real(kind=WP), allocatable,dimension(:) :: dum_2d_n, dum_2d_e !_______________________________________________________________________________ @@ -334,14 +240,8 @@ MODULE o_ARRAYS !GM_stuff real(kind=WP),allocatable :: bvfreq(:,:),mixlay_dep(:),bv_ref(:) -real(kind=WP), allocatable :: fer_UV(:,:,:), fer_wvel(:,:) real(kind=WP), target, allocatable :: fer_c(:), fer_scal(:), fer_K(:,:), fer_gamma(:,:,:) real(kind=WP), allocatable :: ice_rejected_salt(:) - -!_______________________________________________________________________________ -! in case ldiag_DVD=.true. --> calculate discrete variance decay (DVD) -real(kind=WP), allocatable :: tr_dvd_horiz(:,:,:),tr_dvd_vert(:,:,:) - END MODULE o_ARRAYS !========================================================== diff --git a/src/oce_muscl_adv.F90 b/src/oce_muscl_adv.F90 index c94085117..2a1270f7f 100755 --- a/src/oce_muscl_adv.F90 +++ b/src/oce_muscl_adv.F90 @@ -1,8 +1,13 @@ module find_up_downwind_triangles_interface interface - subroutine find_up_downwind_triangles(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh + subroutine find_up_downwind_triangles(twork, partit, mesh) + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer_work), intent(inout), target :: twork end subroutine end interface end module @@ -24,28 +29,37 @@ subroutine find_up_downwind_triangles(mesh) ! find_up_downwind_triangles ! fill_up_dn_grad ! adv_tracer_muscl -subroutine muscl_adv_init(mesh) +subroutine muscl_adv_init(twork, partit, mesh) use MOD_MESH - use O_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER use o_ARRAYS use o_PARAM - use g_PARSUP use g_comm_auto use g_config use find_up_downwind_triangles_interface IMPLICIT NONE - integer :: n, k, n1, n2, n_num - integer :: nz - type(t_mesh), intent(in) , target :: mesh + integer :: n, k, n1, n2 -#include "associate_mesh.h" + type(t_mesh), intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer_work), intent(inout), target :: twork + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! find upwind and downwind triangle for each local edge - call find_up_downwind_triangles(mesh) + call find_up_downwind_triangles(twork, partit, mesh) !___________________________________________________________________________ - n_num=0 + nn_size=0 + k=0 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n) +!$OMP DO REDUCTION(max: k) do n=1, myDim_nod2D ! get number of neighbouring nodes from sparse stiffness matrix ! stiffnes matrix filled up in subroutine init_stiff_mat_ale @@ -55,78 +69,103 @@ subroutine muscl_adv_init(mesh) ! next value switches to a new row ! --> SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) gives maximum number of ! neighbouring nodes within a single row of the sparse matrix - k=SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n) - if(k>n_num) n_num=k ! nnum maximum number of neighbouring nodes + k=max(k, SSH_stiff%rowptr(n+1)-SSH_stiff%rowptr(n)) end do - +!$OMP END DO +!$OMP END PARALLEL + nn_size=k !___________________________________________________________________________ - allocate(nn_num(myDim_nod2D), nn_pos(n_num,myDim_nod2D)) + allocate(mesh%nn_num(myDim_nod2D), mesh%nn_pos(nn_size,myDim_nod2D)) + nn_num(1:myDim_nod2D) => mesh%nn_num(:) + nn_pos(1:nn_size, 1:myDim_nod2D) => mesh%nn_pos(:,:) ! These are the same arrays that we also use in quadratic reconstruction !MOVE IT TO SOMEWHERE ELSE - do n=1,myDim_nod2d +!$OMP PARALLEL DO + do n=1, myDim_nod2d ! number of neigbouring nodes to node n nn_num(n)=1 ! local position of neigbouring nodes nn_pos(1,n)=n end do - +!$OMP END PARALLEL DO !___________________________________________________________________________ - allocate(nboundary_lay(myDim_nod2D+eDim_nod2D)) !node n becomes a boundary node after layer nboundary_lay(n) - nboundary_lay=nl-1 + allocate(twork%nboundary_lay(myDim_nod2D+eDim_nod2D)) !node n becomes a boundary node after layer twork%nboundary_lay(n) + twork%nboundary_lay=nl-1 +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k, n1, n2) +!$OMP DO do n=1, myDim_edge2D ! n1 and n2 are local indices n1=edges(1,n) n2=edges(2,n) + +#if defined(__openmp_reproducible) +!$OMP ORDERED +#endif + ! ... if(n1<=myDim_nod2D) --> because dont use extended nodes if(n1<=myDim_nod2D) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(n1)) +#endif nn_pos(nn_num(n1)+1,n1)=n2 nn_num(n1)=nn_num(n1)+1 +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(n1)) +#endif end if ! ... if(n2<=myDim_nod2D) --> because dont use extended nodes if(n2<=myDim_nod2D) then +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock(partit%plock(n2)) +#endif nn_pos(nn_num(n2)+1,n2)=n1 nn_num(n2)=nn_num(n2)+1 +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(n2)) +#endif end if if (any(edge_tri(:,n)<=0)) then ! this edge nodes is already at the surface at the boundary ... - ! later here ...sign(1, nboundary_lay(enodes(1))-nz) for nz=1 must be negativ - ! thats why here nboundary_lay(edges(:,n))=0 - nboundary_lay(edges(:,n))=0 + ! later here ...sign(1, twork%nboundary_lay(enodes(1))-nz) for nz=1 must be negativ + ! thats why here twork%nboundary_lay(edges(:,n))=0 + twork%nboundary_lay(edges(:,n))=0 else ! this edge nodes become boundary edge with increasing depth due to bottom topography - ! at the depth nboundary_lay the edge (edgepoints) still has two valid ocean triangles + ! at the depth twork%nboundary_lay the edge (edgepoints) still has two valid ocean triangles ! below that depth, edge becomes boundary edge - nboundary_lay(edges(1,n))=min(nboundary_lay(edges(1,n)), minval(nlevels(edge_tri(:,n)))-1) - nboundary_lay(edges(2,n))=min(nboundary_lay(edges(2,n)), minval(nlevels(edge_tri(:,n)))-1) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_set_lock (partit%plock(edges(1,n))) +#endif + twork%nboundary_lay(edges(1,n))=min(twork%nboundary_lay(edges(1,n)), minval(nlevels(edge_tri(:,n)))-1) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(edges(1,n))) + call omp_set_lock (partit%plock(edges(2,n))) +#endif + twork%nboundary_lay(edges(2,n))=min(twork%nboundary_lay(edges(2,n)), minval(nlevels(edge_tri(:,n)))-1) +#if defined(_OPENMP) && !defined(__openmp_reproducible) + call omp_unset_lock(partit%plock(edges(2,n))) +#endif end if - end do - -!!PS !___________________________________________________________________________ -!!PS --> is transfered to oce_mesh.F90 --> subroutine find_levels_min_e2n(mesh) -!!PS --> can be deleted here! -!!PS allocate(mesh%nlevels_nod2D_min(myDim_nod2D+eDim_nod2D)) -!!PS allocate(mesh%ulevels_nod2D_min(myDim_nod2D+eDim_nod2D)) -!!PS do n=1, myDim_nod2d -!!PS k=nod_in_elem2D_num(n) -!!PS ! minimum depth in neigbouring elements around node n -!!PS mesh%nlevels_nod2D_min(n)=minval(nlevels(nod_in_elem2D(1:k, n))) -!!PS mesh%ulevels_nod2D_max(n)=maxval(ulevels(nod_in_elem2D(1:k, n))) -!!PS end do -!!PS call exchange_nod(mesh%nlevels_nod2D_min) -!!PS call exchange_nod(mesh%ulevels_nod2D_min) +#if defined(__openmp_reproducible) +!$OMP END ORDERED +#endif + end do +!$OMP END DO +!$OMP END PARALLEL end SUBROUTINE muscl_adv_init ! ! !_______________________________________________________________________________ -SUBROUTINE find_up_downwind_triangles(mesh) +SUBROUTINE find_up_downwind_triangles(twork, partit, mesh) USE MOD_MESH -USE O_MESH +USE MOD_PARTIT +USE MOD_PARSUP +USE MOD_TRACER USE o_ARRAYS USE o_PARAM -USE g_PARSUP USE g_CONFIG use g_comm_auto IMPLICIT NONE @@ -135,25 +174,41 @@ SUBROUTINE find_up_downwind_triangles(mesh) real(kind=WP), allocatable :: coord_elem(:, :,:), temp(:) integer, allocatable :: temp_i(:), e_nodes(:,:) -type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" +type(t_mesh), intent(in) , target :: mesh +type(t_partit), intent(inout), target :: partit +type(t_tracer_work), intent(inout), target :: twork +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" -allocate(edge_up_dn_tri(2,myDim_edge2D)) -allocate(edge_up_dn_grad(4,nl-1,myDim_edge2D)) -edge_up_dn_tri=0 +allocate(twork%edge_up_dn_tri(2,myDim_edge2D)) +allocate(twork%edge_up_dn_grad(4,nl-1,myDim_edge2D)) +twork%edge_up_dn_tri=0 ! ===== ! In order that this procedure works, we need to know nodes and their coordinates ! on the extended set of elements (not only my, but myDim+eDim+eXDim) ! ===== -allocate(coord_elem(2,3,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) +allocate(coord_elem(2, 3, myDim_elem2D+eDim_elem2D+eXDim_elem2D)) allocate(temp(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) DO n=1,3 DO k=1,2 - do el=1,myDim_elem2D +!$OMP PARALLEL +!$OMP DO + DO el=1,myDim_elem2D temp(el)=coord_nod2D(k,elem2D_nodes(n,el)) - end do - call exchange_elem(temp) - coord_elem(k,n,:)=temp(:) + END DO +!$OMP END DO +!$OMP MASTER + call exchange_elem(temp, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO + DO el=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D + coord_elem(k,n,el)=temp(el) + END DO +!$OMP END DO +!$OMP END PARALLEL END DO END DO deallocate(temp) @@ -161,15 +216,27 @@ SUBROUTINE find_up_downwind_triangles(mesh) allocate(e_nodes(3,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) allocate(temp_i(myDim_elem2D+eDim_elem2D+eXDim_elem2D)) DO n=1,3 +!$OMP PARALLEL +!$OMP DO do el=1,myDim_elem2D temp_i(el)=myList_nod2D(elem2D_nodes(n,el)) end do - call exchange_elem(temp_i) - e_nodes(n,:)=temp_i(:) +!$OMP END DO +!$OMP MASTER + call exchange_elem(temp_i, partit) +!$OMP END MASTER +!$OMP BARRIER +!$OMP DO + DO el=1, myDim_elem2D+eDim_elem2D+eXDim_elem2D + e_nodes(n, el)=temp_i(el) + END DO +!$OMP END DO +!$OMP END PARALLEL END DO deallocate(temp_i) - +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(n, k, ednodes, elem, el, x,b, c, cr, bx, by, xx, xy, ab, ax) +!$OMP DO DO n=1, myDim_edge2d ednodes=edges(:,n) x=coord_nod2D(:,ednodes(2))-coord_nod2D(:,ednodes(1)) @@ -208,18 +275,18 @@ SUBROUTINE find_up_downwind_triangles(mesh) ! Since b and c are the sides of triangle, |ab|0.0_WP).and.(ax>0.0_WP).and.(axab)) then - edge_up_dn_tri(1,n)=elem + twork%edge_up_dn_tri(1,n)=elem cycle endif if((ab==ax).or.(ax==0.0_WP)) then - edge_up_dn_tri(1,n)=elem + twork%edge_up_dn_tri(1,n)=elem cycle endif -END DO + END DO ! Find downwind element x=-x DO k=1,nod_in_elem2D_num(ednodes(2)) @@ -251,59 +318,68 @@ SUBROUTINE find_up_downwind_triangles(mesh) ! Since b and c are the sides of triangle, |ab|0.0_WP).and.(ax>0.0_WP).and.(axab)) then - edge_up_dn_tri(2,n)=elem + twork%edge_up_dn_tri(2,n)=elem cycle endif if((ab==ax).or.(ax==0.0)) then - edge_up_dn_tri(2,n)=elem + twork%edge_up_dn_tri(2,n)=elem cycle endif END DO END DO +!$OMP END DO +!$OMP END PARALLEL + ! For edges touching the boundary --- up or downwind elements may be absent. ! We return to the standard Miura at nodes that ! belong to such edges. Same at the depth. ! Count the number of 'good' edges: -k=0 -DO n=1,myDim_edge2D - if((edge_up_dn_tri(1,n).ne.0).and.(edge_up_dn_tri(2,n).ne.0)) k=k+1 -END DO +!k=0 +!DO n=1, myDim_edge2D +! if((twork%edge_up_dn_tri(1,n).ne.0).and.(twork%edge_up_dn_tri(2,n).ne.0)) k=k+1 +!END DO +!$OMP PARALLEL DO +DO n=1, myDim_edge2D + twork%edge_up_dn_grad(:, :, n)=0.0_WP +END DO +!$OMP END PARALLEL DO deallocate(e_nodes, coord_elem) - - -edge_up_dn_grad=0.0_WP - end SUBROUTINE find_up_downwind_triangles ! ! !_______________________________________________________________________________ -SUBROUTINE fill_up_dn_grad(mesh) - +SUBROUTINE fill_up_dn_grad(twork, partit, mesh) ! ttx, tty elemental gradient of tracer USE o_PARAM USE MOD_MESH -USE O_MESH +USE MOD_PARTIT +USE MOD_PARSUP +USE MOD_TRACER USE o_ARRAYS -USE g_PARSUP IMPLICIT NONE -integer :: n, nz, elem, k, edge, ednodes(2), nzmin, nzmax +integer :: edge, n, nz, elem, k, ednodes(2), nzmin, nzmax real(kind=WP) :: tvol, tx, ty -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - +type(t_mesh), intent(in), target :: mesh +type(t_partit), intent(inout), target :: partit +type(t_tracer_work), intent(inout), target :: twork +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" !___________________________________________________________________________ ! loop over edge segments - DO edge=1,myDim_edge2D +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(edge, n, nz, elem, k, ednodes, nzmin, nzmax, tvol, tx, ty) +!$OMP DO + DO edge=1, myDim_edge2D ednodes=edges(:,edge) !_______________________________________________________________________ ! case when edge has upwind and downwind triangle on the surface - if((edge_up_dn_tri(1,edge).ne.0.0_WP).and.(edge_up_dn_tri(2,edge).ne.0.0_WP)) then + if((twork%edge_up_dn_tri(1,edge).ne.0.0_WP).and.(twork%edge_up_dn_tri(2,edge).ne.0.0_WP)) then nzmin = maxval(ulevels_nod2D_max(ednodes)) nzmax = minval(nlevels_nod2D_min(ednodes)) @@ -324,8 +400,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(1,nz,edge)=tx/tvol - edge_up_dn_grad(3,nz,edge)=ty/tvol + twork%edge_up_dn_grad(1,nz,edge)=tx/tvol + twork%edge_up_dn_grad(3,nz,edge)=ty/tvol END DO !___________________________________________________________________ @@ -345,8 +421,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(2,nz,edge)=tx/tvol - edge_up_dn_grad(4,nz,edge)=ty/tvol + twork%edge_up_dn_grad(2,nz,edge)=tx/tvol + twork%edge_up_dn_grad(4,nz,edge)=ty/tvol END DO !___________________________________________________________________ @@ -354,9 +430,9 @@ SUBROUTINE fill_up_dn_grad(mesh) !!PS DO nz=1, minval(nlevels_nod2D_min(ednodes))-1 DO nz=nzmin, nzmax-1 ! tracer gradx for upwind and downwind tri - edge_up_dn_grad(1:2,nz,edge)=tr_xy(1,nz,edge_up_dn_tri(:,edge)) + twork%edge_up_dn_grad(1:2,nz,edge)=tr_xy(1,nz,twork%edge_up_dn_tri(:,edge)) ! tracer grady for upwind and downwind tri - edge_up_dn_grad(3:4,nz,edge)=tr_xy(2,nz,edge_up_dn_tri(:,edge)) + twork%edge_up_dn_grad(3:4,nz,edge)=tr_xy(2,nz,twork%edge_up_dn_tri(:,edge)) END DO !___________________________________________________________________ @@ -377,8 +453,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(1,nz,edge)=tx/tvol - edge_up_dn_grad(3,nz,edge)=ty/tvol + twork%edge_up_dn_grad(1,nz,edge)=tx/tvol + twork%edge_up_dn_grad(3,nz,edge)=ty/tvol END DO !___________________________________________________________________ ! loop over not shared depth levels of edge node 2 (ednodes(2)) @@ -398,8 +474,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(2,nz,edge)=tx/tvol - edge_up_dn_grad(4,nz,edge)=ty/tvol + twork%edge_up_dn_grad(2,nz,edge)=tx/tvol + twork%edge_up_dn_grad(4,nz,edge)=ty/tvol END DO !_______________________________________________________________________ ! case when edge either upwind or downwind triangle on the surface @@ -421,8 +497,8 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(1,nz,edge)=tx/tvol - edge_up_dn_grad(3,nz,edge)=ty/tvol + twork%edge_up_dn_grad(1,nz,edge)=tx/tvol + twork%edge_up_dn_grad(3,nz,edge)=ty/tvol END DO nzmin = ulevels_nod2D(ednodes(2)) nzmax = nlevels_nod2D(ednodes(2)) @@ -439,9 +515,11 @@ SUBROUTINE fill_up_dn_grad(mesh) tx=tx+tr_xy(1,nz,elem)*elem_area(elem) ty=ty+tr_xy(2,nz,elem)*elem_area(elem) END DO - edge_up_dn_grad(2,nz,edge)=tx/tvol - edge_up_dn_grad(4,nz,edge)=ty/tvol + twork%edge_up_dn_grad(2,nz,edge)=tx/tvol + twork%edge_up_dn_grad(4,nz,edge)=ty/tvol END DO end if - END DO + END DO +!$OMP END DO +!$OMP END PARALLEL END SUBROUTINE fill_up_dn_grad diff --git a/src/oce_setup_step.F90 b/src/oce_setup_step.F90 index 0d9dc083b..e892fec38 100755 --- a/src/oce_setup_step.F90 +++ b/src/oce_setup_step.F90 @@ -1,40 +1,108 @@ -module array_setup_interface - interface - subroutine array_setup(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module module oce_initial_state_interface - interface - subroutine oce_initial_state(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface + interface + subroutine oce_initial_state(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in) , target :: mesh + end subroutine + end interface +end module + +module tracer_init_interface + interface + subroutine tracer_init(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in) , target :: mesh + end subroutine + end interface +end module + +module dynamics_init_interface + interface + subroutine dynamics_init(dynamics, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface +end module + +module ocean_setup_interface + interface + subroutine ocean_setup(dynamics, tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout) , target :: mesh + end subroutine + end interface +end module + +module before_oce_step_interface + interface + subroutine before_oce_step(dynamics, tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use mod_tracer + use MOD_DYN + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + end subroutine + end interface end module ! ! !_______________________________________________________________________________ -subroutine ocean_setup(mesh) -USE MOD_MESH -USE o_PARAM -USE g_PARSUP -USE o_ARRAYS -USE g_config -USE g_forcing_param, only: use_virt_salt -use g_cvmix_tke -use g_cvmix_idemix -use g_cvmix_pp -use g_cvmix_kpp -use g_cvmix_tidal -use Toy_Channel_Soufflet -use array_setup_interface -use oce_initial_state_interface -use oce_adv_tra_fct_interfaces -IMPLICIT NONE -type(t_mesh), intent(inout) , target :: mesh +subroutine ocean_setup(dynamics, tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE MOD_DYN + USE o_PARAM + USE o_ARRAYS + USE g_config + USE g_forcing_param, only: use_virt_salt + use g_cvmix_tke + use g_cvmix_idemix + use g_cvmix_pp + use g_cvmix_kpp + use g_cvmix_tidal + use g_backscatter + use Toy_Channel_Soufflet + use oce_initial_state_interface + use oce_adv_tra_fct_interfaces + use init_ale_interface + use init_thickness_ale_interface + IMPLICIT NONE + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(inout), target :: mesh + !___________________________________________________________________________ + integer :: n + !___setup virt_salt_flux____________________________________________________ ! if the ale thinkness remain unchanged (like in 'linfs' case) the vitrual ! salinity flux need to be used @@ -49,17 +117,19 @@ subroutine ocean_setup(mesh) use_virt_salt=.true. is_nonlinfs = 0.0_WP end if - call array_setup(mesh) - + !___________________________________________________________________________ ! initialize arrays for ALE - if (mype==0) then + if (partit%mype==0) then write(*,*) '____________________________________________________________' write(*,*) ' --> initialise ALE arrays + sparse SSH stiff matrix' write(*,*) end if - call init_ale(mesh) - call init_stiff_mat_ale(mesh) !!PS test + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_ale'//achar(27)//'[0m' + call init_ale(dynamics, partit, mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_stiff_mat_ale'//achar(27)//'[0m' + call init_stiff_mat_ale(partit, mesh) !!PS test !___________________________________________________________________________ ! initialize arrays from cvmix library for CVMIX_KPP, CVMIX_PP, CVMIX_TKE, @@ -81,39 +151,45 @@ subroutine ocean_setup(mesh) case ('cvmix_TKE+cvmix_IDEMIX') ; mix_scheme_nmb = 56 case default stop "!not existing mixing scheme!" - call par_ex + call par_ex(partit%MPI_COMM_FESOM, partit%mype) end select ! initialise fesom1.4 like KPP if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then - call oce_mixing_kpp_init(mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_mixing_kpp_init'//achar(27)//'[0m' + call oce_mixing_kpp_init(partit, mesh) ! initialise fesom1.4 like PP elseif (mix_scheme_nmb==2 .or. mix_scheme_nmb==27) then ! initialise cvmix_KPP elseif (mix_scheme_nmb==3 .or. mix_scheme_nmb==37) then - call init_cvmix_kpp(mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_kpp'//achar(27)//'[0m' + call init_cvmix_kpp(partit, mesh) ! initialise cvmix_PP elseif (mix_scheme_nmb==4 .or. mix_scheme_nmb==47) then - call init_cvmix_pp(mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_pp'//achar(27)//'[0m' + call init_cvmix_pp(partit, mesh) ! initialise cvmix_TKE elseif (mix_scheme_nmb==5 .or. mix_scheme_nmb==56) then - call init_cvmix_tke(mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_tke'//achar(27)//'[0m' + call init_cvmix_tke(partit, mesh) endif ! initialise additional mixing cvmix_IDEMIX --> only in combination with ! cvmix_TKE+cvmix_IDEMIX or stand alone for debbuging as cvmix_TKE if (mod(mix_scheme_nmb,10)==6) then - call init_cvmix_idemix(mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_idemix'//achar(27)//'[0m' + call init_cvmix_idemix(partit, mesh) ! initialise additional mixing cvmix_TIDAL --> only in combination with ! KPP+cvmix_TIDAL, PP+cvmix_TIDAL, cvmix_KPP+cvmix_TIDAL, cvmix_PP+cvmix_TIDAL ! or stand alone for debbuging as cvmix_TIDAL elseif (mod(mix_scheme_nmb,10)==7) then - call init_cvmix_tidal(mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_cvmix_tidal'//achar(27)//'[0m' + call init_cvmix_tidal(partit, mesh) end if !___________________________________________________________________________ @@ -124,18 +200,18 @@ subroutine ocean_setup(mesh) ! compute for all cavity points (ulevels_nod2D>1), which is the closest ! cavity line point to that point --> use their coordinates and depth --> ! use for extrapolation of init state under cavity - if (use_cavity) call compute_nrst_pnt2cavline(mesh) + if (use_cavity) call compute_nrst_pnt2cavline(partit, mesh) - if (use_density_ref) call init_ref_density(mesh) + if (use_density_ref) call init_ref_density(partit, mesh) !___________________________________________________________________________ - if(mype==0) write(*,*) 'Arrays are set' + if(partit%mype==0) write(*,*) 'Arrays are set' !if(open_boundary) call set_open_boundary !TODO - - call oce_adv_tra_fct_init(mesh) - call muscl_adv_init(mesh) !!PS test + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_adv_tra_fct_init'//achar(27)//'[0m' + call oce_adv_tra_fct_init(tracers%work, partit, mesh) + call muscl_adv_init(tracers%work, partit, mesh) !!PS test !===================== ! Initialize fields ! A user-defined routine has to be called here! @@ -143,249 +219,418 @@ subroutine ocean_setup(mesh) if (toy_ocean) then SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call toy_channel'//achar(27)//'[0m' if (mod(mstep, soufflet_forc_update)==0) then - call initial_state_soufflet(mesh) - call compute_zonal_mean_ini(mesh) - call compute_zonal_mean(mesh) + call initial_state_soufflet(dynamics, tracers, partit, mesh) + call compute_zonal_mean_ini(partit, mesh) + call compute_zonal_mean(dynamics, tracers, partit, mesh) end if END SELECT else - call oce_initial_state(mesh) ! Use it if not running tests + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call oce_initial_state'//achar(27)//'[0m' + call oce_initial_state(tracers, partit, mesh) ! Use it if not running tests end if - if (.not.r_restart) tr_arr_old=tr_arr + if (.not.r_restart) then + do n=1, tracers%num_tracers + tracers%data(n)%valuesAB=tracers%data(n)%values + end do + end if !___________________________________________________________________________ ! first time fill up array for hnode & helem - if (mype==0) then + if (partit%mype==0) then write(*,*) '____________________________________________________________' write(*,*) ' --> call init_thickness_ale' write(*,*) end if - call init_thickness_ale(mesh) + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[36m'//' --> call init_thickness_ale'//achar(27)//'[0m' + call init_thickness_ale(dynamics, partit, mesh) + + !___________________________________________________________________________ + ! initialise arrays that are needed for backscatter_coef + if(dynamics%opt_visc==8) call init_backscatter(partit, mesh) + !___________________________________________________________________________ - if(mype==0) write(*,*) 'Initial state' - if (w_split .and. mype==0) then + if(partit%mype==0) write(*,*) 'Initial state' + if (dynamics%use_wsplit .and. partit%mype==0) then write(*,*) '******************************************************************************' write(*,*) 'vertical velocity will be split onto explicit and implicit constitutes;' - write(*,*) 'maximum allowed CDF on explicit W is set to: ', w_max_cfl + write(*,*) 'maximum allowed CDF on explicit W is set to: ', dynamics%wsplit_maxcfl write(*,*) '******************************************************************************' end if end subroutine ocean_setup ! ! !_______________________________________________________________________________ -SUBROUTINE array_setup(mesh) -USE MOD_MESH -USE o_ARRAYS -USE o_PARAM -USE g_PARSUP -use g_comm_auto -use g_config -use g_forcing_arrays -use o_mixing_kpp_mod ! KPP -USE g_forcing_param, only: use_virt_salt -use diagnostics, only: ldiag_dMOC, ldiag_DVD -IMPLICIT NONE -integer :: elem_size, node_size -integer :: n -type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - - -elem_size=myDim_elem2D+eDim_elem2D -node_size=myDim_nod2D+eDim_nod2D - - -! ================ -! Velocities -! ================ -!allocate(stress_diag(2, elem_size))!delete me -allocate(UV(2, nl-1, elem_size)) -allocate(UV_rhs(2,nl-1, elem_size)) -allocate(UV_rhsAB(2,nl-1, elem_size)) -allocate(Visc(nl-1, elem_size)) -! ================ -! elevation and its rhs -! ================ -allocate(eta_n(node_size), d_eta(node_size)) -allocate(ssh_rhs(node_size)) -! ================ -! Monin-Obukhov -! ================ -if (use_ice .and. use_momix) allocate(mo(nl,node_size),mixlength(node_size)) -if (use_ice .and. use_momix) mixlength=0. -! ================ -! Vertical velocity and pressure -! ================ -allocate(Wvel(nl, node_size), hpressure(nl,node_size)) -allocate(Wvel_e(nl, node_size), Wvel_i(nl, node_size)) -allocate(CFL_z(nl, node_size)) ! vertical CFL criteria -! ================ -! Temperature and salinity -! ================ -allocate(T_rhs(nl-1, node_size)) -allocate(S_rhs(nl-1, node_size)) -allocate(tr_arr(nl-1,node_size,num_tracers),tr_arr_old(nl-1,node_size,num_tracers)) -allocate(del_ttf(nl-1,node_size)) -allocate(del_ttf_advhoriz(nl-1,node_size),del_ttf_advvert(nl-1,node_size)) -del_ttf = 0.0_WP -del_ttf_advhoriz = 0.0_WP -del_ttf_advvert = 0.0_WP -!!PS allocate(del_ttf_diff(nl-1,node_size)) -if (ldiag_DVD) then - allocate(tr_dvd_horiz(nl-1,node_size,2),tr_dvd_vert(nl-1,node_size,2)) - tr_dvd_horiz = 0.0_WP - tr_dvd_vert = 0.0_WP -end if - -allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) -! ================ -! Ocean forcing arrays -! ================ -allocate(Tclim(nl-1,node_size), Sclim(nl-1, node_size)) -allocate(stress_surf(2,myDim_elem2D)) !!! Attention, it is shorter !!! -allocate(stress_atmoce_x(node_size), stress_atmoce_y(node_size)) -allocate(relax2clim(node_size)) -allocate(heat_flux(node_size), Tsurf(node_size)) -allocate(water_flux(node_size), Ssurf(node_size)) -allocate(relax_salt(node_size)) -allocate(virtual_salt(node_size)) - -allocate(heat_flux_in(node_size)) -allocate(real_salt_flux(node_size)) !PS -! ================= -! Arrays used to organize surface forcing -! ================= -allocate(Tsurf_t(node_size,2), Ssurf_t(node_size,2)) -allocate(tau_x_t(node_size,2), tau_y_t(node_size,2)) - -! ================= -! All auxiliary arrays -! ================= - -!if(mom_adv==3) then -allocate(vorticity(nl-1,node_size)) -vorticity=0.0_WP -!end if - -! ================= -! Visc and Diff coefs -! ================= - -allocate(Av(nl,elem_size), Kv(nl,node_size)) - -Av=0.0_WP -Kv=0.0_WP -if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then - allocate(Kv_double(nl,node_size,num_tracers)) - Kv_double=0.0_WP - !!PS call oce_mixing_kpp_init ! Setup constants, allocate arrays and construct look up table -end if - -! ================= -! Backscatter arrays -! ================= - -if(visc_option==8) then - -allocate(uke(nl-1,elem_size)) ! Unresolved kinetic energy for backscatter coefficient -allocate(v_back(nl-1,elem_size)) ! Backscatter viscosity -allocate(uke_dis(nl-1,elem_size), uke_back(nl-1,elem_size)) -allocate(uke_dif(nl-1,elem_size)) -allocate(uke_rhs(nl-1,elem_size), uke_rhs_old(nl-1,elem_size)) -allocate(UV_dis_tend(2,nl-1,elem_size), UV_back_tend(2,nl-1,elem_size)) -allocate(UV_total_tend(2,nl-1,elem_size)) - -uke=0.0_8 -v_back=0.0_8 -uke_dis=0.0_8 -uke_dif=0.0_8 -uke_back=0.0_8 -uke_rhs=0.0_8 -uke_rhs_old=0.0_8 -UV_dis_tend=0.0_8 -UV_back_tend=0.0_8 -UV_total_tend=0.0_8 -end if - -!Velocities at nodes -allocate(Unode(2,nl-1,node_size)) - -! tracer gradients & RHS -allocate(ttrhs(nl-1,node_size)) -allocate(tr_xy(2,nl-1,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) -allocate(tr_z(nl,myDim_nod2D+eDim_nod2D)) - -! neutral slope etc. to be used in Redi formulation -allocate(neutral_slope(3, nl-1, node_size)) -allocate(slope_tapered(3, nl-1, node_size)) -allocate(Ki(nl-1, node_size)) - -do n=1, node_size -! Ki(n)=K_hor*area(1,n)/scale_area - Ki(:,n)=K_hor*(mesh_resolution(n)/100000.0_WP)**2 -end do -call exchange_nod(Ki) - -neutral_slope=0.0_WP -slope_tapered=0.0_WP - -allocate(MLD1(node_size), MLD2(node_size), MLD1_ind(node_size), MLD2_ind(node_size)) -if (use_global_tides) then - allocate(ssh_gp(node_size)) - ssh_gp=0. -end if -! xy gradient of a neutral surface -allocate(sigma_xy(2, nl-1, node_size)) -sigma_xy=0.0_WP - -! alpha and beta in the EoS -allocate(sw_beta(nl-1, node_size), sw_alpha(nl-1, node_size)) -sw_beta=0.0_WP -sw_alpha=0.0_WP - -if (Fer_GM) then - allocate(fer_c(node_size),fer_scal(node_size), fer_gamma(2, nl, node_size), fer_K(nl, node_size)) - allocate(fer_wvel(nl, node_size), fer_UV(2, nl-1, elem_size)) - fer_gamma=0.0_WP - fer_uv=0.0_WP - fer_wvel=0.0_WP - fer_K=500._WP - fer_c=1._WP - fer_scal = 0.0_WP -end if - -if (SPP) then - allocate(ice_rejected_salt(node_size)) - ice_rejected_salt=0._WP -end if - -! ================= -! Initialize with zeros -! ================= - - UV=0.0_WP - UV_rhs=0.0_WP - UV_rhsAB=0.0_WP +SUBROUTINE tracer_init(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE DIAGNOSTICS, only: ldiag_DVD + USE g_ic3d + IMPLICIT NONE + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in) , target :: mesh + type(nml_tracer_list_type), target, allocatable :: nml_tracer_list(:) + !___________________________________________________________________________ + integer :: elem_size, node_size + integer, save :: nm_unit = 104 ! unit to open namelist file, skip 100-102 for cray + integer :: iost + integer :: n + !___________________________________________________________________________ + ! define tracer namelist parameter + integer :: num_tracers + logical :: i_vert_diff, smooth_bh_tra + real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra + namelist /tracer_listsize/ num_tracers + namelist /tracer_list / nml_tracer_list + namelist /tracer_general / smooth_bh_tra, gamma0_tra, gamma1_tra, gamma2_tra, i_vert_diff + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ + ! OPEN and read namelist for I/O + open( unit=nm_unit, file='namelist.tra', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) WRITE(*,*) ' file : ', 'namelist.tra',' open ok' + else + if (mype==0) WRITE(*,*) 'ERROR: --> bad opening file : ', 'namelist.tra',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + + READ(nm_unit, nml=tracer_listsize, iostat=iost) + allocate(nml_tracer_list(num_tracers)) + READ(nm_unit, nml=tracer_list, iostat=iost) + read(nm_unit, nml=tracer_init3d, iostat=iost) + READ(nm_unit, nml=tracer_general, iostat=iost) + close(nm_unit) + + do n=1, num_tracers + if (nml_tracer_list(n)%id==-1) then + if (mype==0) write(*,*) 'number of tracers will be changed from ', num_tracers, ' to ', n-1, '!' + num_tracers=n-1 + EXIT + end if + end do + + if (mype==0) write(*,*) 'total number of tracers is: ', num_tracers + + !___________________________________________________________________________ + ! define local vertice & elem array size + number of tracers + elem_size=myDim_elem2D+eDim_elem2D + node_size=myDim_nod2D+eDim_nod2D + tracers%num_tracers=num_tracers + + !___________________________________________________________________________ + ! allocate/initialise horizontal velocity arrays in derived type + ! Temperature (index=1), Salinity (index=2), etc. + allocate(tracers%data(num_tracers)) + do n=1, tracers%num_tracers + allocate(tracers%data(n)%values (nl-1,node_size)) + allocate(tracers%data(n)%valuesAB(nl-1,node_size)) + tracers%data(n)%ID = nml_tracer_list(n)%id + tracers%data(n)%tra_adv_hor = TRIM(nml_tracer_list(n)%adv_hor) + tracers%data(n)%tra_adv_ver = TRIM(nml_tracer_list(n)%adv_ver) + tracers%data(n)%tra_adv_lim = TRIM(nml_tracer_list(n)%adv_lim) + tracers%data(n)%tra_adv_ph = nml_tracer_list(n)%adv_ph + tracers%data(n)%tra_adv_pv = nml_tracer_list(n)%adv_pv + tracers%data(n)%smooth_bh_tra = smooth_bh_tra + tracers%data(n)%gamma0_tra = gamma0_tra + tracers%data(n)%gamma1_tra = gamma1_tra + tracers%data(n)%gamma2_tra = gamma2_tra + tracers%data(n)%values = 0. + tracers%data(n)%valuesAB = 0. + tracers%data(n)%i_vert_diff = i_vert_diff + end do + allocate(tracers%work%del_ttf(nl-1,node_size)) + allocate(tracers%work%del_ttf_advhoriz(nl-1,node_size),tracers%work%del_ttf_advvert(nl-1,node_size)) + tracers%work%del_ttf = 0.0_WP + tracers%work%del_ttf_advhoriz = 0.0_WP + tracers%work%del_ttf_advvert = 0.0_WP + if (ldiag_DVD) then + allocate(tracers%work%tr_dvd_horiz(nl-1,node_size,2),tracers%work%tr_dvd_vert(nl-1,node_size,2)) + tracers%work%tr_dvd_horiz = 0.0_WP + tracers%work%tr_dvd_vert = 0.0_WP + end if +END SUBROUTINE tracer_init +! +! +!_______________________________________________________________________________ +SUBROUTINE dynamics_init(dynamics, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE o_param + IMPLICIT NONE + type(t_mesh) , intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_dyn) , intent(inout), target :: dynamics + !___________________________________________________________________________ + integer :: elem_size, node_size + integer, save :: nm_unit = 105 ! unit to open namelist file, skip 100-102 for cray + integer :: iost + !___________________________________________________________________________ + ! define dynamics namelist parameter + integer :: opt_visc + real(kind=WP) :: visc_gamma0, visc_gamma1, visc_gamma2 + real(kind=WP) :: visc_easybsreturn + logical :: use_ivertvisc + integer :: momadv_opt + logical :: use_freeslip + logical :: use_wsplit + real(kind=WP) :: wsplit_maxcfl + namelist /dynamics_visc / opt_visc, visc_gamma0, visc_gamma1, visc_gamma2, & + use_ivertvisc, visc_easybsreturn + namelist /dynamics_general/ momadv_opt, use_freeslip, use_wsplit, wsplit_maxcfl + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ + ! open and read namelist for I/O + open(unit=nm_unit, file='namelist.dyn', form='formatted', access='sequential', status='old', iostat=iost ) + if (iost == 0) then + if (mype==0) write(*,*) ' file : ', 'namelist.dyn',' open ok' + else + if (mype==0) write(*,*) 'ERROR: --> bad opening file : ', 'namelist.dyn',' ; iostat=',iost + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + end if + read(nm_unit, nml=dynamics_visc, iostat=iost) + read(nm_unit, nml=dynamics_general, iostat=iost) + close(nm_unit) + + !___________________________________________________________________________ + ! set parameters in derived type + dynamics%opt_visc = opt_visc + dynamics%visc_gamma0 = visc_gamma0 + dynamics%visc_gamma1 = visc_gamma1 + dynamics%visc_gamma2 = visc_gamma2 + dynamics%visc_easybsreturn = visc_easybsreturn + dynamics%use_ivertvisc = use_ivertvisc + dynamics%momadv_opt = momadv_opt + dynamics%use_freeslip = use_freeslip + dynamics%use_wsplit = use_wsplit + dynamics%wsplit_maxcfl = wsplit_maxcfl + + !___________________________________________________________________________ + ! define local vertice & elem array size + elem_size=myDim_elem2D+eDim_elem2D + node_size=myDim_nod2D+eDim_nod2D + + !___________________________________________________________________________ + ! allocate/initialise horizontal velocity arrays in derived type + allocate(dynamics%uv( 2, nl-1, elem_size)) + allocate(dynamics%uv_rhs( 2, nl-1, elem_size)) + allocate(dynamics%uv_rhsAB( 2, nl-1, elem_size)) + allocate(dynamics%uvnode( 2, nl-1, node_size)) + dynamics%uv = 0.0_WP + dynamics%uv_rhs = 0.0_WP + dynamics%uv_rhsAB = 0.0_WP + dynamics%uvnode = 0.0_WP + if (Fer_GM) then + allocate(dynamics%fer_uv(2, nl-1, elem_size)) + dynamics%fer_uv = 0.0_WP + end if + + !___________________________________________________________________________ + ! allocate/initialise vertical velocity arrays in derived type + allocate(dynamics%w( nl, node_size)) + allocate(dynamics%w_e( nl, node_size)) + allocate(dynamics%w_i( nl, node_size)) + allocate(dynamics%cfl_z( nl, node_size)) + dynamics%w = 0.0_WP + dynamics%w_e = 0.0_WP + dynamics%w_i = 0.0_WP + dynamics%cfl_z = 0.0_WP + if (Fer_GM) then + allocate(dynamics%fer_w( nl, node_size)) + dynamics%fer_w = 0.0_WP + end if + + !___________________________________________________________________________ + ! allocate/initialise ssh arrays in derived type + allocate(dynamics%eta_n( node_size)) + allocate(dynamics%d_eta( node_size)) + allocate(dynamics%ssh_rhs( node_size)) + dynamics%eta_n = 0.0_WP + dynamics%d_eta = 0.0_WP + dynamics%ssh_rhs = 0.0_WP + !!PS allocate(dynamics%ssh_rhs_old(node_size)) + !!PS dynamics%ssh_rhs_old= 0.0_WP + + !___________________________________________________________________________ + ! inititalise working arrays + allocate(dynamics%work%uvnode_rhs(2, nl-1, node_size)) + allocate(dynamics%work%u_c(nl-1, elem_size)) + allocate(dynamics%work%v_c(nl-1, elem_size)) + dynamics%work%uvnode_rhs = 0.0_WP + dynamics%work%u_c = 0.0_WP + dynamics%work%v_c = 0.0_WP + if (dynamics%opt_visc==5) then + allocate(dynamics%work%u_b(nl-1, elem_size)) + allocate(dynamics%work%v_b(nl-1, elem_size)) + dynamics%work%u_b = 0.0_WP + dynamics%work%v_b = 0.0_WP + end if +END SUBROUTINE dynamics_init +! ! - eta_n=0.0_WP - d_eta=0.0_WP - ssh_rhs=0.0_WP - Wvel=0.0_WP - Wvel_e =0.0_WP - Wvel_i =0.0_WP - CFL_z =0.0_WP +!_______________________________________________________________________________ +SUBROUTINE arrays_init(num_tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE o_ARRAYS + USE o_PARAM + use g_comm_auto + use g_config + use g_forcing_arrays + use o_mixing_kpp_mod ! KPP + USE g_forcing_param, only: use_virt_salt + use diagnostics, only: ldiag_dMOC, ldiag_DVD + IMPLICIT NONE + integer, intent(in) :: num_tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + !___________________________________________________________________________ + integer :: elem_size, node_size + integer :: n + !___________________________________________________________________________ + ! define dynamics namelist parameter +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ + elem_size=myDim_elem2D+eDim_elem2D + node_size=myDim_nod2D+eDim_nod2D + + ! ================ + ! Velocities + ! ================ + !allocate(stress_diag(2, elem_size))!delete me + !!PS allocate(Visc(nl-1, elem_size)) + ! ================ + ! elevation and its rhs + ! ================ + + ! ================ + ! Monin-Obukhov + ! ================ + if (use_ice .and. use_momix) allocate(mo(nl,node_size),mixlength(node_size)) + if (use_ice .and. use_momix) mixlength=0. + ! ================ + ! Vertical velocity and pressure + ! ================ + allocate( hpressure(nl,node_size)) + allocate(bvfreq(nl,node_size),mixlay_dep(node_size),bv_ref(node_size)) + ! ================ + ! Ocean forcing arrays + ! ================ + allocate(Tclim(nl-1,node_size), Sclim(nl-1, node_size)) + allocate(stress_surf(2,myDim_elem2D)) !!! Attention, it is shorter !!! + allocate(stress_node_surf(2,node_size)) + allocate(stress_atmoce_x(node_size), stress_atmoce_y(node_size)) + allocate(relax2clim(node_size)) + allocate(heat_flux(node_size), Tsurf(node_size)) + allocate(water_flux(node_size), Ssurf(node_size)) + allocate(relax_salt(node_size)) + allocate(virtual_salt(node_size)) + + allocate(heat_flux_in(node_size)) + allocate(real_salt_flux(node_size)) !PS + ! ================= + ! Arrays used to organize surface forcing + ! ================= + allocate(Tsurf_t(node_size,2), Ssurf_t(node_size,2)) + allocate(tau_x_t(node_size,2), tau_y_t(node_size,2)) + + + ! ================= + ! Visc and Diff coefs + ! ================= + + allocate(Av(nl,elem_size), Kv(nl,node_size)) + + Av=0.0_WP + Kv=0.0_WP + if (mix_scheme_nmb==1 .or. mix_scheme_nmb==17) then + allocate(Kv_double(nl,node_size, num_tracers)) + Kv_double=0.0_WP + !!PS call oce_mixing_kpp_init ! Setup constants, allocate arrays and construct look up table + end if + + ! tracer gradients & RHS + allocate(tr_xy(2,nl-1,myDim_elem2D+eDim_elem2D+eXDim_elem2D)) + allocate(tr_z(nl,myDim_nod2D+eDim_nod2D)) + + ! neutral slope etc. to be used in Redi formulation + allocate(neutral_slope(3, nl-1, node_size)) + allocate(slope_tapered(3, nl-1, node_size)) + allocate(Ki(nl-1, node_size)) + + do n=1, node_size + ! Ki(n)=K_hor*area(1,n)/scale_area + Ki(:,n)=K_hor*(mesh_resolution(n)/100000.0_WP)**2 + end do + call exchange_nod(Ki, partit) + + neutral_slope=0.0_WP + slope_tapered=0.0_WP + + allocate(MLD1(node_size), MLD2(node_size), MLD1_ind(node_size), MLD2_ind(node_size)) + if (use_global_tides) then + allocate(ssh_gp(node_size)) + ssh_gp=0. + end if + ! xy gradient of a neutral surface + allocate(sigma_xy(2, nl-1, node_size)) + sigma_xy=0.0_WP + + ! alpha and beta in the EoS + allocate(sw_beta(nl-1, node_size), sw_alpha(nl-1, node_size)) + allocate(dens_flux(node_size)) + sw_beta =0.0_WP + sw_alpha =0.0_WP + dens_flux=0.0_WP + + if (Fer_GM) then + allocate(fer_c(node_size),fer_scal(node_size), fer_gamma(2, nl, node_size), fer_K(nl, node_size)) + fer_gamma=0.0_WP + fer_K=500._WP + fer_c=1._WP + fer_scal = 0.0_WP + end if + + if (SPP) then + allocate(ice_rejected_salt(node_size)) + ice_rejected_salt=0._WP + end if + + ! ================= + ! Initialize with zeros + ! ================= hpressure=0.0_WP ! - T_rhs=0.0_WP heat_flux=0.0_WP heat_flux_in=0.0_WP Tsurf=0.0_WP - S_rhs=0.0_WP water_flux=0.0_WP relax_salt=0.0_WP virtual_salt=0.0_WP @@ -393,12 +638,12 @@ SUBROUTINE array_setup(mesh) Ssurf=0.0_WP real_salt_flux=0.0_WP - stress_atmoce_x=0. - stress_atmoce_y=0. - tr_arr=0.0_WP - tr_arr_old=0.0_WP - + stress_surf =0.0_WP + stress_node_surf =0.0_WP + stress_atmoce_x =0.0_WP + stress_atmoce_y =0.0_WP + bvfreq=0.0_WP mixlay_dep=0.0_WP bv_ref=0.0_WP @@ -439,188 +684,208 @@ SUBROUTINE array_setup(mesh) !!PS dum_3d_n = 0.0_WP !!PS dum_2d_e = 0.0_WP !!PS dum_3d_e = 0.0_WP -END SUBROUTINE array_setup +END SUBROUTINE arrays_init ! ! !_______________________________________________________________________________ ! Here the 3D tracers will be initialized. Initialization strategy depends on a tracer ID. ! ID = 0 and 1 are reserved for temperature and salinity -SUBROUTINE oce_initial_state(mesh) -USE MOD_MESH -USE o_ARRAYS -USE g_PARSUP -USE g_config -USE g_ic3d - ! - ! reads the initial state or the restart file for the ocean - ! - implicit none - integer :: i, k, counter, rcounter3, id - character(len=10) :: i_string, id_string - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: loc, max_temp, min_temp, max_salt, min_salt - -#include "associate_mesh.h" - - if (mype==0) write(*,*) num_tracers, ' tracers will be used in FESOM' - if (mype==0) write(*,*) 'tracer IDs are: ', tracer_ID(1:num_tracers) - ! - ! read ocean state - ! this must be always done! First two tracers with IDs 0 and 1 are the temperature and salinity. - if(mype==0) write(*,*) 'read Temperatur climatology from:', trim(filelist(1)) - if(mype==0) write(*,*) 'read Salt climatology from:', trim(filelist(2)) - call do_ic3d(mesh) - - Tclim=tr_arr(:,:,1) - Sclim=tr_arr(:,:,2) - Tsurf=tr_arr(1,:,1) - Ssurf=tr_arr(1,:,2) - relax2clim=0.0_WP - - ! count the passive tracers which require 3D source (ptracers_restore_total) - ptracers_restore_total=0 - DO i=3, num_tracers - id=tracer_ID(i) - SELECT CASE (id) - CASE (301) - ptracers_restore_total=ptracers_restore_total+1 - CASE (302) - ptracers_restore_total=ptracers_restore_total+1 - CASE (303) - ptracers_restore_total=ptracers_restore_total+1 - - END SELECT - END DO - allocate(ptracers_restore(ptracers_restore_total)) - - rcounter3=0 ! counter for tracers with 3D source - DO i=3, num_tracers - id=tracer_ID(i) - SELECT CASE (id) - CASE (101) ! initialize tracer ID=101 - tr_arr(:,:,i)=0.0_WP - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) - end if - CASE (301) !Fram Strait 3d restored passive tracer - tr_arr(:,:,i)=0.0_WP - rcounter3 =rcounter3+1 - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>77.5*rad) .and. (geo_coord_nod2D(2,k)<78.*rad))& - .and.((geo_coord_nod2D(1,k)>0. *rad) .and. (geo_coord_nod2D(1,k)<10.*rad))) then - counter=counter+1 - end if - end do - allocate(ptracers_restore(rcounter3)%ind2(counter)) - ptracers_restore(rcounter3)%id =301 - ptracers_restore(rcounter3)%locid=i - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>77.5*rad) .and. (geo_coord_nod2D(2,k)<78.*rad))& - .and.((geo_coord_nod2D(1,k)>0. *rad) .and. (geo_coord_nod2D(1,k)<10.*rad))) then - counter=counter+1 - ptracers_restore(rcounter3)%ind2(counter)=k +! --> reads the initial state or the restart file for the ocean +SUBROUTINE oce_initial_state(tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE o_ARRAYS + USE g_config + USE g_ic3d + implicit none + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: i, k, counter, rcounter3, id + character(len=10) :: i_string, id_string + real(kind=WP) :: loc, max_temp, min_temp, max_salt, min_salt + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ + if (mype==0) write(*,*) tracers%num_tracers, ' tracers will be used in FESOM' + if (mype==0) write(*,*) 'tracer IDs are: ', tracers%data(1:tracers%num_tracers)%ID + ! + ! read ocean state + ! this must be always done! First two tracers with IDs 0 and 1 are the temperature and salinity. + if(mype==0) write(*,*) 'read Temperatur climatology from:', trim(filelist(1)) + if(mype==0) write(*,*) 'read Salt climatology from:', trim(filelist(2)) + call do_ic3d(tracers, partit, mesh) + + Tclim=tracers%data(1)%values + Sclim=tracers%data(2)%values + Tsurf=Tclim(1,:) + Ssurf=Sclim(1,:) + relax2clim=0.0_WP + + ! count the passive tracers which require 3D source (ptracers_restore_total) + ptracers_restore_total=0 + DO i=3, tracers%num_tracers + id=tracers%data(i)%ID + SELECT CASE (id) + CASE (301) + ptracers_restore_total=ptracers_restore_total+1 + CASE (302) + ptracers_restore_total=ptracers_restore_total+1 + CASE (303) + ptracers_restore_total=ptracers_restore_total+1 + + END SELECT + END DO + allocate(ptracers_restore(ptracers_restore_total)) + + rcounter3=0 ! counter for tracers with 3D source + DO i=3, tracers%num_tracers + id=tracers%data(i)%ID + SELECT CASE (id) + CASE (101) ! initialize tracer ID=101 + tracers%data(i)%values(:,:)=0.0_WP + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if - end do - tr_arr(:,ptracers_restore(rcounter3)%ind2,i)=1. - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) - end if - - CASE (302) !Bering Strait 3d restored passive tracer - tr_arr(:,:,i)=0. - rcounter3 =rcounter3+1 - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>65.6*rad) .and. (geo_coord_nod2D(2,k)<66.*rad))& - .and.((geo_coord_nod2D(1,k)>-172. *rad) .and. (geo_coord_nod2D(1,k)<-166.*rad))) then - counter=counter+1 + CASE (301) !Fram Strait 3d restored passive tracer + tracers%data(i)%values(:,:)=0.0_WP + rcounter3 =rcounter3+1 + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>77.5*rad) .and. (geo_coord_nod2D(2,k)<78.*rad))& + .and.((geo_coord_nod2D(1,k)>0. *rad) .and. (geo_coord_nod2D(1,k)<10.*rad))) then + counter=counter+1 + end if + end do + allocate(ptracers_restore(rcounter3)%ind2(counter)) + ptracers_restore(rcounter3)%id =301 + ptracers_restore(rcounter3)%locid=i + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>77.5*rad) .and. (geo_coord_nod2D(2,k)<78.*rad))& + .and.((geo_coord_nod2D(1,k)>0. *rad) .and. (geo_coord_nod2D(1,k)<10.*rad))) then + counter=counter+1 + ptracers_restore(rcounter3)%ind2(counter)=k + end if + end do + tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=1. + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if - end do - allocate(ptracers_restore(rcounter3)%ind2(counter)) - ptracers_restore(rcounter3)%id =302 - ptracers_restore(rcounter3)%locid=i - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>65.6*rad) .and. (geo_coord_nod2D(2,k)<66.*rad))& - .and.((geo_coord_nod2D(1,k)>-172. *rad) .and. (geo_coord_nod2D(1,k)<-166.*rad))) then - counter=counter+1 - ptracers_restore(rcounter3)%ind2(counter)=k + + CASE (302) !Bering Strait 3d restored passive tracer + tracers%data(i)%values(:,:)=0.0_WP + rcounter3 =rcounter3+1 + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>65.6*rad) .and. (geo_coord_nod2D(2,k)<66.*rad))& + .and.((geo_coord_nod2D(1,k)>-172. *rad) .and. (geo_coord_nod2D(1,k)<-166.*rad))) then + counter=counter+1 + end if + end do + allocate(ptracers_restore(rcounter3)%ind2(counter)) + ptracers_restore(rcounter3)%id =302 + ptracers_restore(rcounter3)%locid=i + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>65.6*rad) .and. (geo_coord_nod2D(2,k)<66.*rad))& + .and.((geo_coord_nod2D(1,k)>-172. *rad) .and. (geo_coord_nod2D(1,k)<-166.*rad))) then + counter=counter+1 + ptracers_restore(rcounter3)%ind2(counter)=k + end if + end do + tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if - end do - tr_arr(:,ptracers_restore(rcounter3)%ind2,i)=1. - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) - end if - - CASE (303) !BSO 3d restored passive tracer - tr_arr(:,:,i)=0. - rcounter3 =rcounter3+1 - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>69.5*rad) .and. (geo_coord_nod2D(2,k)<74.5*rad))& - .and.((geo_coord_nod2D(1,k)>19. *rad) .and. (geo_coord_nod2D(1,k)<20.*rad))) then - counter=counter+1 + + CASE (303) !BSO 3d restored passive tracer + tracers%data(i)%values(:,:)=0.0_WP + rcounter3 =rcounter3+1 + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>69.5*rad) .and. (geo_coord_nod2D(2,k)<74.5*rad))& + .and.((geo_coord_nod2D(1,k)>19. *rad) .and. (geo_coord_nod2D(1,k)<20.*rad))) then + counter=counter+1 + end if + end do + allocate(ptracers_restore(rcounter3)%ind2(counter)) + ptracers_restore(rcounter3)%id =303 + ptracers_restore(rcounter3)%locid=i + counter=0 + do k=1, myDim_nod2D+eDim_nod2D + if (((geo_coord_nod2D(2,k)>69.5*rad) .and. (geo_coord_nod2D(2,k)<74.5*rad))& + .and.((geo_coord_nod2D(1,k)>19. *rad) .and. (geo_coord_nod2D(1,k)<20.*rad))) then + counter=counter+1 + ptracers_restore(rcounter3)%ind2(counter)=k + end if + end do + tracers%data(i)%values(:,ptracers_restore(rcounter3)%ind2)=0.0_WP + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) end if - end do - allocate(ptracers_restore(rcounter3)%ind2(counter)) - ptracers_restore(rcounter3)%id =303 - ptracers_restore(rcounter3)%locid=i - counter=0 - do k=1, myDim_nod2D+eDim_nod2D - if (((geo_coord_nod2D(2,k)>69.5*rad) .and. (geo_coord_nod2D(2,k)<74.5*rad))& - .and.((geo_coord_nod2D(1,k)>19. *rad) .and. (geo_coord_nod2D(1,k)<20.*rad))) then - counter=counter+1 - ptracers_restore(rcounter3)%ind2(counter)=k + CASE DEFAULT + if (mype==0) then + write (i_string, "(I3)") i + write (id_string, "(I3)") id + if (mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified for '//trim(i_string)//' th tracer!!!' + if (mype==0) write(*,*) 'the model will stop!' end if - end do - tr_arr(:,ptracers_restore(rcounter3)%ind2,i)=1. - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - write(*,*) 'initializing '//trim(i_string)//'th tracer with ID='//trim(id_string) - end if - CASE DEFAULT - if (mype==0) then - write (i_string, "(I3)") i - write (id_string, "(I3)") id - if (mype==0) write(*,*) 'invalid ID '//trim(id_string)//' specified for '//trim(i_string)//' th tracer!!!' - if (mype==0) write(*,*) 'the model will stop!' - end if - call par_ex - stop - END SELECT - END DO + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + stop + END SELECT + END DO end subroutine oce_initial_state ! ! !========================================================================== ! Here we do things (if applicable) before the ocean timestep will be made -SUBROUTINE before_oce_step(mesh) +SUBROUTINE before_oce_step(dynamics, tracers, partit, mesh) USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE MOD_DYN USE o_ARRAYS - USE g_PARSUP USE g_config USE Toy_Channel_Soufflet implicit none + type(t_dyn) , intent(inout), target :: dynamics + type(t_tracer), intent(inout), target :: tracers + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ integer :: i, k, counter, rcounter3, id character(len=10) :: i_string, id_string - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" - + !___________________________________________________________________________ + ! pointer on necessary derived types +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + !___________________________________________________________________________ if (toy_ocean) then SELECT CASE (TRIM(which_toy)) CASE ("soufflet") !forcing update for soufflet testcase if (mod(mstep, soufflet_forc_update)==0) then - call compute_zonal_mean(mesh) + call compute_zonal_mean(dynamics, tracers, partit, mesh) end if END SELECT end if diff --git a/src/oce_shortwave_pene.F90 b/src/oce_shortwave_pene.F90 index 36d1b4b32..a40361c01 100644 --- a/src/oce_shortwave_pene.F90 +++ b/src/oce_shortwave_pene.F90 @@ -1,31 +1,50 @@ -subroutine cal_shortwave_rad(mesh) +subroutine cal_shortwave_rad(ice, partit, mesh) ! This routine is inherited from FESOM 1.4 and adopted appropreately. It calculates ! shortwave penetration into the ocean assuming the constant chlorophyll concentration. ! No penetration under the ice is applied. A decent way for ice region is to be discussed. ! This routine should be called after ice2oce coupling done if ice model is used. ! Ref.: Morel and Antoine 1994, Sweeney et al. 2005 - USE MOD_MESH - USE o_PARAM - USE o_ARRAYS - USE g_PARSUP - USE g_CONFIG - use g_forcing_arrays - use g_comm_auto - use i_param - use i_arrays - use i_therm_param - IMPLICIT NONE - - integer :: m, n2, n3, k, nzmax, nzmin - real(kind=WP):: swsurf, aux - real(kind=WP):: c, c2, c3, c4, c5 - real(kind=WP):: v1, v2, sc1, sc2 - type(t_mesh), intent(in) , target :: mesh - -#include "associate_mesh.h" + USE MOD_ICE + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + USE o_PARAM + USE o_ARRAYS + USE g_CONFIG + use g_forcing_arrays + use g_comm_auto + IMPLICIT NONE + type(t_ice) , intent(inout), target :: ice + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: m, n2, n3, k, nzmax, nzmin + real(kind=WP):: swsurf, aux + real(kind=WP):: c, c2, c3, c4, c5 + real(kind=WP):: v1, v2, sc1, sc2 + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:), pointer :: a_ice + real(kind=WP) , pointer :: albw +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + a_ice => ice%data(1)%values(:) + albw => ice%thermo%albw + + !___________________________________________________________________________ +!$OMP PARALLEL DEFAULT(SHARED) PRIVATE(m, n2, n3, k, nzmax, nzmin, swsurf, aux, c, c2, c3, c4, c5, v1, v2, sc1, sc2) +!$OMP DO + do n2=1, myDim_nod2D+eDim_nod2D + do k=1, nl + sw_3d(k, n2)=0.0_WP + end do + end do +!$OMP END DO - sw_3d=0.0_WP !_____________________________________________________________________________ +!$OMP DO do n2=1, myDim_nod2D+eDim_nod2D !__________________________________________________________________________ @@ -58,15 +77,12 @@ subroutine cal_shortwave_rad(mesh) v1=0.321_WP+v1 sc1=1.54_WP-0.197_WP*c+0.166_WP*c2-0.252_WP*c3-0.055_WP*c4+0.042_WP*c5 sc2=7.925_WP-6.644_WP*c+3.662_WP*c2-1.815_WP*c3-0.218_WP*c4+0.502_WP*c5 - ! convert from heat flux [W/m2] to temperature flux [K m/s] swsurf=swsurf/vcpw ! vis. sw. rad. in the colume nzmax=(nlevels_nod2D(n2)) nzmin=(ulevels_nod2D(n2)) - !!PS sw_3d(1, n2)=swsurf sw_3d(nzmin, n2)=swsurf - !!PS do k=2, nzmax do k=nzmin+1, nzmax aux=(v1*exp(zbar_3d_n(k,n2)/sc1)+v2*exp(zbar_3d_n(k,n2)/sc2)) sw_3d(k, n2)=swsurf*aux @@ -90,6 +106,6 @@ subroutine cal_shortwave_rad(mesh) !end if end do -!call par_ex -!stop +!$OMP END DO +!$OMP END PARALLEL end subroutine cal_shortwave_rad diff --git a/src/oce_spp.F90 b/src/oce_spp.F90 index 83cc17e36..9631f1a62 100644 --- a/src/oce_spp.F90 +++ b/src/oce_spp.F90 @@ -8,27 +8,37 @@ ! Ref: Duffy1997, Duffy1999, Nguyen2009 ! Originaly coded by Qiang Wang in FESOM 1.4 !-------------------------------------------------------- -subroutine cal_rejected_salt(mesh) -use g_parsup +subroutine cal_rejected_salt(ice, partit, mesh) use o_arrays +USE MOD_ICE use mod_mesh +USE MOD_PARTIT +USE MOD_PARSUP use g_comm_auto use o_tracers -use g_forcing_arrays, only: thdgr -use i_ARRAYS, only: S_oc_array -use i_therm_param, only: rhoice, rhowat, Sice use g_config, only: dt implicit none integer :: row real(kind=WP) :: aux -type(t_mesh), intent(in), target :: mesh - -#include "associate_mesh.h" +type(t_ice) , intent(in), target :: ice +type(t_mesh) , intent(in), target :: mesh +type(t_partit), intent(in), target :: partit +real(kind=WP), dimension(:) , pointer :: thdgr, S_oc_array +real(kind=WP) , pointer :: rhoice, rhowat, Sice +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" +thdgr => ice%thermo%thdgr +S_oc_array => ice%srfoce_salt +rhoice => ice%thermo%rhoice +rhowat => ice%thermo%rhowat +Sice => ice%thermo%Sice aux=rhoice/rhowat*dt do row=1, myDim_nod2d +eDim_nod2D! myDim is sufficient - if (thdgr(row)>0.0_WP) then + if (thdgr(row)>0.0_WP .and. ulevels_nod2D(row)==1) then ice_rejected_salt(row)= & (S_oc_array(row)-Sice)*thdgr(row)*aux*area(1, row) !unit: psu m3 @@ -41,12 +51,13 @@ end subroutine cal_rejected_salt ! !---------------------------------------------------------------------------- ! -subroutine app_rejected_salt(mesh) - use g_parsup +subroutine app_rejected_salt(ttf, partit, mesh) use o_arrays use mod_mesh - use g_comm_auto + USE MOD_PARTIT + USE MOD_PARSUP use o_tracers + use g_comm_auto implicit none integer :: row, k, nod, nup, nlo, kml, nzmin, nzmax @@ -58,20 +69,25 @@ subroutine app_rejected_salt(mesh) data n_distr /5/ data rho_cri /0.4_WP/ !kg/m3 !SH !Duffy1999 - type(t_mesh), intent(in) , target :: mesh + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(in), target :: partit + real(kind=WP), intent (inout) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) -#include "associate_mesh.h" +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" do row=1,myDim_nod2d+eDim_nod2D ! myDim is sufficient + if (ulevels_nod2D(row)>1) cycle if (ice_rejected_salt(row)<=0.0_WP) cycle ! do not parameterize brine rejection in regions with low salinity ! 1. it leads to further decrease of SSS ! 2. in case of non zero salinity of ice (the well accepted value is 5psu) the SSS might become negative nzmin = ulevels_nod2D(row) nzmax = nlevels_nod2D(row) - !!PS if (tr_arr(1,row,2) < 10.0_WP) cycle - if (tr_arr(nzmin,row,2) < 10.0_WP) cycle - if (geo_coord_nod2D(2,row)>0.0_WP) then !NH + if (ttf(nzmin,row) < 10.0_WP) cycle + !if (geo_coord_nod2D(2,row)>0.0_WP) then !NH kml=1 !!PS spar(1)=0.0_WP spar(nzmin)=0.0_WP @@ -79,26 +95,19 @@ subroutine app_rejected_salt(mesh) !!PS do k=1, nlevels_nod2D(row) do k=nzmin, nzmax drhodz=bvfreq(k, row)*density_0/g - if (drhodz>=drhodz_cri .or. Z_3d_n(k,row)<-50.0_WP) exit + if (drhodz>=drhodz_cri .or. Z_3d_n(k,row)<-80.0_WP) exit kml=kml+1 spar(k+1)=area(k+1,row)*hnode(k+1,row)*(Z_3d_n(1,row)-Z_3d_n(k+1,row))**n_distr end do - !!PS if (kml>1) then - !!PS tr_arr(1,row,2)=tr_arr(1,row,2)-ice_rejected_salt(row)/area(1,row)/hnode(1,row) - !!PS spar(2:kml)=spar(2:kml)/sum(spar(2:kml)) - !!PS do k=2,kml - !!PS tr_arr(k,row,2)=tr_arr(k,row,2)+ice_rejected_salt(row)*spar(k)/area(k,row)/hnode(k,row) - !!PS end do - !!PS endif if (kml>nzmin) then - tr_arr(nzmin,row,2)=tr_arr(nzmin,row,2)-ice_rejected_salt(row)/area(1,row)/hnode(1,row) + ttf(nzmin,row)=ttf(nzmin,row)-ice_rejected_salt(row)/areasvol(1,row)/hnode(1,row) spar(nzmin+1:kml)=spar(nzmin+1:kml)/sum(spar(nzmin+1:kml)) do k=nzmin+1,kml - tr_arr(k,row,2)=tr_arr(k,row,2)+ice_rejected_salt(row)*spar(k)/area(k,row)/hnode(k,row) + ttf(k,row)=ttf(k,row)+ice_rejected_salt(row)*spar(k)/areasvol(k,row)/hnode(k,row) end do endif - endif + !endif end do end subroutine app_rejected_salt diff --git a/src/oce_tracer_mod.F90 b/src/oce_tracer_mod.F90 index f8c06fa56..f374ca5d2 100755 --- a/src/oce_tracer_mod.F90 +++ b/src/oce_tracer_mod.F90 @@ -1,37 +1,84 @@ !============================================================================================ MODULE o_tracers USE MOD_MESH +USE MOD_TRACER +USE MOD_PARTIT +USE MOD_PARSUP IMPLICIT NONE -interface - subroutine tracer_gradient_z(ttf, mesh) - use g_PARSUP, only: myDim_nod2D, eDim_nod2D - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: ttf(mesh%nl-1,myDim_nod2D+eDim_nod2D) - end subroutine -end interface - CONTAINS ! ! -!======================================================================= -SUBROUTINE tracer_gradient_elements(ttf, mesh) - !computes elemental gradient of tracer +!=============================================================================== +SUBROUTINE init_tracers_AB(tr_num, tracers, partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + use g_config, only: flag_debug + use o_arrays + use g_comm_auto + IMPLICIT NONE + integer, intent(in) :: tr_num + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: n,nz +!$OMP PARALLEL DO + do n=1, partit%myDim_nod2D+partit%eDim_nod2D + ! del_ttf will contain all advection / diffusion contributions for this tracer. Set it to 0 at the beginning! + tracers%work%del_ttf (:, n) = 0.0_WP + tracers%work%del_ttf_advhoriz (:, n) = 0.0_WP + tracers%work%del_ttf_advvert (:, n) = 0.0_WP + ! AB interpolation + tracers%data(tr_num)%valuesAB(:, n) =-(0.5_WP+epsilon)*tracers%data(tr_num)%valuesAB(:, n)+(1.5_WP+epsilon)*tracers%data(tr_num)%values(:, n) + end do +!$OMP END PARALLEL DO + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' + call tracer_gradient_elements(tracers%data(tr_num)%valuesAB, partit, mesh) + call exchange_elem_begin(tr_xy, partit) + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' + call tracer_gradient_z(tracers%data(tr_num)%values, partit, mesh) !WHY NOT AB HERE? DSIDOREN! + call exchange_elem_end(partit) ! tr_xy used in fill_up_dn_grad +!$OMP BARRIER + + call exchange_nod_begin(tr_z, partit) ! not used in fill_up_dn_grad + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' + call fill_up_dn_grad(tracers%work, partit, mesh) + call exchange_nod_end(partit) ! tr_z halos should have arrived by now. + + if (flag_debug .and. partit%mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' + call tracer_gradient_elements(tracers%data(tr_num)%values, partit, mesh) !redefine tr_arr to the current timestep + call exchange_elem(tr_xy, partit) +END SUBROUTINE init_tracers_AB +! +! +!======================================================================= +SUBROUTINE tracer_gradient_elements(ttf, partit, mesh) + !computes elemental gradient of tracer + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER USE o_PARAM USE o_ARRAYS - USE g_PARSUP IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: ttf(mesh%nl-1,myDim_nod2D+eDim_nod2D) - integer :: elem, elnodes(3) - integer :: n, nz, nzmin, nzmax - - -#include "associate_mesh.h" - + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + integer :: elem, elnodes(3) + integer :: nz, nzmin, nzmax + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(elem, elnodes, nz, nzmin, nzmax) DO elem=1, myDim_elem2D elnodes=elem2D_nodes(:,elem) nzmin = ulevels(elem) @@ -42,100 +89,32 @@ SUBROUTINE tracer_gradient_elements(ttf, mesh) tr_xy(2,nz, elem)=sum(gradient_sca(4:6,elem)*ttf(nz,elnodes)) END DO END DO +!$OMP END PARALLEL DO END SUBROUTINE tracer_gradient_elements ! ! !======================================================================================== -SUBROUTINE init_tracers_AB(tr_num, mesh) - use g_config, only: flag_debug - use g_parsup - use o_arrays - use g_comm_auto - use mod_mesh - - IMPLICIT NONE - integer :: tr_num,n,nz - type(t_mesh), intent(in) , target :: mesh - - !filling work arrays - del_ttf=0.0_WP - - !AB interpolation - tr_arr_old(:,:,tr_num)=-(0.5_WP+epsilon)*tr_arr_old(:,:,tr_num)+(1.5_WP+epsilon)*tr_arr(:,:,tr_num) - - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tr_arr_old(:,:,tr_num), mesh) - call exchange_elem_begin(tr_xy) - - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_z'//achar(27)//'[0m' - call tracer_gradient_z(tr_arr(:,:,tr_num), mesh) - call exchange_elem_end() ! tr_xy used in fill_up_dn_grad - call exchange_nod_begin(tr_z) ! not used in fill_up_dn_grad - - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call fill_up_dn_grad'//achar(27)//'[0m' - call fill_up_dn_grad(mesh) - call exchange_nod_end() ! tr_z halos should have arrived by now. - - if (flag_debug .and. mype==0) print *, achar(27)//'[38m'//' --> call tracer_gradient_elements'//achar(27)//'[0m' - call tracer_gradient_elements(tr_arr(:,:,tr_num), mesh) !redefine tr_arr to the current timestep - call exchange_elem(tr_xy) - -END SUBROUTINE init_tracers_AB -! -! -!======================================================================================== -SUBROUTINE relax_to_clim(tr_num, mesh) - - use g_config,only: dt - USE g_PARSUP - use o_arrays - IMPLICIT NONE - - type(t_mesh), intent(in) , target :: mesh - integer :: tr_num,n,nz, nzmin, nzmax - -#include "associate_mesh.h" - - if ((clim_relax>1.0e-8_WP).and.(tr_num==1)) then - DO n=1, myDim_nod2D - nzmin = ulevels_nod2D(n) - nzmax = nlevels_nod2D(n) - !!PS tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)=tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)+& - !!PS relax2clim(n)*dt*(Tclim(1:nlevels_nod2D(n)-1,n)-tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)) - tr_arr(nzmin:nzmax-1,n,tr_num)=tr_arr(nzmin:nzmax-1,n,tr_num)+& - relax2clim(n)*dt*(Tclim(nzmin:nzmax-1,n)-tr_arr(nzmin:nzmax-1,n,tr_num)) - END DO - END if - if ((clim_relax>1.0e-8_WP).and.(tr_num==2)) then - DO n=1, myDim_nod2D - nzmin = ulevels_nod2D(n) - nzmax = nlevels_nod2D(n) - !!PS tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)=tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)+& - !!PS relax2clim(n)*dt*(Sclim(1:nlevels_nod2D(n)-1,n)-tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)) - tr_arr(nzmin:nzmax-1,n,tr_num)=tr_arr(nzmin:nzmax-1,n,tr_num)+& - relax2clim(n)*dt*(Sclim(nzmin:nzmax-1,n)-tr_arr(nzmin:nzmax-1,n,tr_num)) - END DO - END IF -END SUBROUTINE relax_to_clim -END MODULE o_tracers -! -! -!======================================================================================== -SUBROUTINE tracer_gradient_z(ttf, mesh) +SUBROUTINE tracer_gradient_z(ttf, partit, mesh) !computes vertical gradient of tracer - USE o_PARAM USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE o_PARAM USE o_ARRAYS - USE g_PARSUP USE g_CONFIG IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - real(kind=WP) :: ttf(mesh%nl-1,myDim_nod2D+eDim_nod2D) + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + real(kind=WP) :: ttf(mesh%nl-1,partit%myDim_nod2D+partit%eDim_nod2D) real(kind=WP) :: dz integer :: n, nz, nzmin, nzmax -#include "associate_mesh.h" - +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz, nzmin, nzmax, dz) DO n=1, myDim_nod2D+eDim_nod2D !!PS nlev=nlevels_nod2D(n) nzmax=nlevels_nod2D(n) @@ -150,4 +129,54 @@ SUBROUTINE tracer_gradient_z(ttf, mesh) tr_z(nzmin, n)=0.0_WP tr_z(nzmax, n)=0.0_WP END DO +!$OMP END PARALLEL DO END SUBROUTINE tracer_gradient_z +! +! +!======================================================================================== +SUBROUTINE relax_to_clim(tr_num, tracers, partit, mesh) + use g_config,only: dt + use o_arrays + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + IMPLICIT NONE + + integer, intent(in) :: tr_num + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + integer :: n, nzmin, nzmax + real(kind=WP), dimension(:,:), pointer :: trarr + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + trarr=>tracers%data(tr_num)%values(:,:) + + if ((clim_relax>1.0e-8_WP).and.(tracers%data(tr_num)%ID==1)) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nzmin, nzmax) + DO n=1, myDim_nod2D + nzmin = ulevels_nod2D(n) + nzmax = nlevels_nod2D(n) + !!PS tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)=tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)+& + !!PS relax2clim(n)*dt*(Tclim(1:nlevels_nod2D(n)-1,n)-tr_arr(1:nlevels_nod2D(n)-1,n,tr_num)) + trarr(nzmin:nzmax-1,n)=trarr(nzmin:nzmax-1,n)+& + relax2clim(n)*dt*(Tclim(nzmin:nzmax-1,n)-trarr(nzmin:nzmax-1,n)) + END DO +!$OMP END PARALLEL DO + END if + if ((clim_relax>1.0e-8_WP).and.(tracers%data(tr_num)%ID==2)) then +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nzmin, nzmax) + DO n=1, myDim_nod2D + nzmin = ulevels_nod2D(n) + nzmax = nlevels_nod2D(n) + trarr(nzmin:nzmax-1,n)=trarr(nzmin:nzmax-1,n)+& + relax2clim(n)*dt*(Sclim(nzmin:nzmax-1,n)-trarr(nzmin:nzmax-1,n)) + END DO +!$OMP END PARALLEL DO + END IF +END SUBROUTINE relax_to_clim +END MODULE o_tracers diff --git a/src/oce_vel_rhs_vinv.F90 b/src/oce_vel_rhs_vinv.F90 deleted file mode 100755 index a09e6658e..000000000 --- a/src/oce_vel_rhs_vinv.F90 +++ /dev/null @@ -1,321 +0,0 @@ -module relative_vorticity_interface - interface - subroutine relative_vorticity(mesh) - use mod_mesh - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module - -! Vector invariant momentum advection: -! (curl u+f)\times u+grad(u^2/2)+w du/dz -! -! =================================================================== -subroutine relative_vorticity(mesh) - USE o_ARRAYS - USE MOD_MESH - USE g_PARSUP - use g_comm_auto - IMPLICIT NONE - integer :: n, nz, el(2), enodes(2), nl1, nl2, edge, ul1, ul2, nl12, ul12 - real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2, c1 - - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" - - !!PS DO n=1,myDim_nod2D - !!PS nl1 = nlevels_nod2D(n)-1 - !!PS ul1 = ulevels_nod2D(n) - !!PS vorticity(ul1:nl1,n)=0.0_WP - !!PS !!PS DO nz=1, nlevels_nod2D(n)-1 - !!PS !!PS vorticity(nz,n)=0.0_WP - !!PS !!PS END DO - !!PS END DO - vorticity(:,1:myDim_nod2D) = 0.0_WP - DO edge=1,myDim_edge2D - !! edge=myList_edge2D(m) - enodes=edges(:,edge) - el=edge_tri(:,edge) - nl1=nlevels(el(1))-1 - ul1=ulevels(el(1)) - deltaX1=edge_cross_dxdy(1,edge) - deltaY1=edge_cross_dxdy(2,edge) - nl2=0 - ul2=0 - if(el(2)>0) then - deltaX2=edge_cross_dxdy(3,edge) - deltaY2=edge_cross_dxdy(4,edge) - nl2=nlevels(el(2))-1 - ul2=ulevels(el(2)) - end if - nl12 = min(nl1,nl2) - ul12 = max(ul1,ul2) - - DO nz=ul1,ul12-1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - if (ul2>0) then - DO nz=ul2,ul12-1 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - endif - !!PS DO nz=1,min(nl1,nl2) - DO nz=ul12,nl12 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1))- & - deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl1 - DO nz=nl12+1,nl1 - c1=deltaX1*UV(1,nz,el(1))+deltaY1*UV(2,nz,el(1)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - !!PS DO nz=min(nl1,nl2)+1,nl2 - DO nz=nl12+1,nl2 - c1= -deltaX2*UV(1,nz,el(2))-deltaY2*UV(2,nz,el(2)) - vorticity(nz,enodes(1))=vorticity(nz,enodes(1))+c1 - vorticity(nz,enodes(2))=vorticity(nz,enodes(2))-c1 - END DO - END DO - - ! vorticity = vorticity*area at this stage - ! It is correct only on myDim nodes - DO n=1,myDim_nod2D - !! n=myList_nod2D(m) - ul1 = ulevels_nod2D(n) - nl1 = nlevels_nod2D(n) - !!PS DO nz=1,nlevels_nod2D(n)-1 - DO nz=ul1,nl1-1 - vorticity(nz,n)=vorticity(nz,n)/area(nz,n) - END DO - END DO - - call exchange_nod(vorticity) - -! Now it the relative vorticity known on neighbors too -end subroutine relative_vorticity -! ========================================================================== -subroutine compute_vel_rhs_vinv(mesh) !vector invariant - USE o_PARAM - USE o_ARRAYS - USE MOD_MESH - USE g_PARSUP - USE g_CONFIG - use g_comm_auto - use relative_vorticity_interface - IMPLICIT NONE - type(t_mesh), intent(in) , target :: mesh - integer :: n, n1, nz, elem, elnodes(3), nl1, j, nzmin,nzmax - real(kind=WP) :: a, b, c, da, db, dc, dg, ff(3), gg, eta(3), pre(3), Fx, Fy,w - real(kind=WP) :: uvert(mesh%nl,2), umean, vmean, friction - logical, save :: lfirst=.true. - real(kind=WP) :: KE_node(mesh%nl-1,myDim_nod2D+eDim_nod2D) - real(kind=WP) :: dZ_inv(2:mesh%nl-1), dzbar_inv(mesh%nl-1), elem_area_inv - real(kind=WP) :: density0_inv = 1./density_0 - -#include "associate_mesh.h" - - uvert=0.0_WP - - ! ====================== - ! Kinetic energy at nodes: - ! ====================== - - - KE_node(:,:)=0.0_WP - - DO elem=1, myDim_elem2D - !! elem=myList_elem2D(m) - elnodes=elem2D_nodes(:,elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - DO j=1,3 !NR interchange loops => nz-loop vectorizes - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - KE_node(nz,elnodes(j)) = KE_node(nz,elnodes(j))+(UV(1,nz,elem)*UV(1,nz,elem) & - +UV(2,nz,elem)*UV(2,nz,elem))*elem_area(elem) !NR/6.0_WP below - END DO - END DO - END DO - - DO n=1,myDim_nod2D - !! n=myList_nod2D(m) - nzmin = ulevels_nod2D(n) - nzmax = nlevels_nod2D(n) - !!PS DO nz=1, nlevels_nod2D(n)-1 - DO nz=nzmin, nzmax-1 - !DO nz=1, nl-1 - KE_node(nz,n)=KE_node(nz,n)/(6._WP*area(nz,n)) !NR divide by 6 here - END DO - END DO - - ! Set the kinetic energy to zero at lateral walls: - DO n=1,myDim_edge2D - !! n=myList_edge2D(m) - if(myList_edge2D(n) > edge2D_in) then - elnodes(1:2)=edges(:,n) - KE_node(:,elnodes(1:2))=0.0_WP - endif - end DO - - call exchange_nod(KE_node) - ! Now gradients of KE will be correct on myDim_elem2D - - ! ================== - ! AB contribution from the old time step - ! ================== - Do elem=1, myDim_elem2D !! P (a) - !! elem=myList_elem2D(m) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nl-1 - DO nz=nzmin,nzmax-1 - UV_rhs(1,nz,elem)=-(0.5_WP+epsilon)*UV_rhsAB(1,nz,elem) - UV_rhs(2,nz,elem)=-(0.5_WP+epsilon)*UV_rhsAB(2,nz,elem) - END DO - END DO - - call relative_vorticity(mesh) - ! ==================== - ! Sea level and pressure contribution -\nabla(g\eta +hpressure/rho_0+V^2/2) - ! and the Coriolis force (elemental part) - ! ==================== - - !DS KE_node=0. !DS - !DS vorticity=0. !DS - DO elem=1, myDim_elem2D !! P (b) elem=1,elem2D - !! elem=myList_elem2D(m) - elnodes = elem2D_nodes(:,elem) - eta = g*eta_n(elnodes) - gg = elem_area(elem) - ff = coriolis_node(elnodes) - - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - pre = -(eta + hpressure(nz,elnodes)*density0_inv) - Fx = sum(gradient_sca(1:3,elem)*pre) - Fy = sum(gradient_sca(4:6,elem)*pre) - UV_rhs(1,nz,elem) = UV_rhs(1,nz,elem)+Fx*gg - UV_rhs(2,nz,elem) = UV_rhs(2,nz,elem)+Fy*gg - - pre = -KE_node(nz,elnodes) - Fx = sum(gradient_sca(1:3,elem)*pre) - Fy = sum(gradient_sca(4:6,elem)*pre) - - da = UV(2,nz,elem)*sum(ff+vorticity(nz,elnodes))/3.0_WP - db =-UV(1,nz,elem)*sum(ff+vorticity(nz,elnodes))/3.0_WP - - UV_rhsAB(1,nz,elem)=(da+Fx)*gg - UV_rhsAB(2,nz,elem)=(db+Fy)*gg - - END DO - END DO - ! ======================= - ! Compute w du/dz at elements: wdu/dz=d(wu)/dz-udw/dz - ! The central estimate of u in the flux term will correspond to energy - ! conservation - ! ======================= - - !NR precompute - DO nz=2,nl-1 - dZ_inv(nz) = 1.0_WP/(Z(nz-1)-Z(nz)) - ENDDO - DO nz=1,nl-1 - dzbar_inv(nz) = 1.0_WP/(zbar(nz)-zbar(nz+1)) - END DO - -!DO elem=1, myDim_elem2D -! !! elem=myList_elem2D(m) -! elnodes=elem2D_nodes(:,elem) -! nl1=nlevels(elem)-1 -! -! uvert(1,1:2)=0d0 -! uvert(nl1+1,1:2)=0d0 -! -! DO nz=2, nl1 -! w=sum(Wvel(nz,elnodes))/3.0_WP -! umean=0.5_WP*(UV(1,nz-1,elem)+UV(1,nz,elem)) -! vmean=0.5_WP*(UV(2,nz-1,elem)+UV(2,nz,elem)) -! uvert(nz,1)=-umean*w -! uvert(nz,2)=-vmean*w -! END DO -! DO nz=1,nl1 -! da=sum(Wvel(nz,elnodes)-Wvel(nz+1,elnodes))/3.0_WP -! UV_rhsAB(1,nz,elem) = UV_rhsAB(1,nz,elem) + (uvert(nz,1)-uvert(nz+1,1)+& -! da*UV(1,nz,elem))*elem_area(elem)*dzbar_inv(nz) !/(zbar(nz)-zbar(nz+1)) -! UV_rhsAB(2,nz,elem)=UV_rhsAB(2,nz,elem)+(uvert(nz,2)-uvert(nz+1,2)+& -! da*UV(2,nz,elem))*elem_area(elem)*dzbar_inv(nz) !/(zbar(nz)-zbar(nz+1)) -! -! END DO -!END DO - - - DO elem=1, myDim_elem2D - !! elem=myList_elem2D(m) - elnodes=elem2D_nodes(:,elem) - !!PS nl1=nlevels(elem)-1 - nzmax=nlevels(elem)-1 - nzmin=ulevels(elem) - - ! w=sum(Wvel(2, elnodes))/3.0_WP - ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) - uvert(1,1)=w*(UV(1,1,elem)-UV(1,2,elem))*dZ_inv(2)*0.5_WP - uvert(1,2)=w*(UV(2,1,elem)-UV(2,2,elem))*dZ_inv(2)*0.5_WP - - ! w=sum(Wvel(nl1, elnodes))/3.0_WP - ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) - !!PS uvert(nl1,1)=w*(UV(1,nl1-1,elem)-UV(1,nl1,elem))*dZ_inv(nl1)*0.5_WP - !!PS uvert(nl1,2)=w*(UV(2,nl1-1,elem)-UV(2,nl1,elem))*dZ_inv(nl1)*0.5_WP - uvert(nzmax,1)=w*(UV(1,nzmax-1,elem)-UV(1,nzmax,elem))*dZ_inv(nzmax)*0.5_WP - uvert(nzmax,2)=w*(UV(2,nzmax-1,elem)-UV(2,nzmax,elem))*dZ_inv(nzmax)*0.5_WP - - - !!PS DO nz=2, nl1-1 - DO nz=nzmin+1, nzmax-1 - ! w=sum(Wvel(nz,elnodes)+Wvel(nz+1,elnodes))/6.0_WP - ! w=min(abs(w), 0.0001)*sign(1.0_WP, w) - if (w >= 0.0_WP) then - uvert(nz,1)=w*(UV(1,nz,elem)-UV(1,nz+1,elem))*dZ_inv(nz+1) - uvert(nz,2)=w*(UV(2,nz,elem)-UV(2,nz+1,elem))*dZ_inv(nz+1) - else - uvert(nz,1)=w*(UV(1,nz-1,elem)-UV(1,nz,elem))*dZ_inv(nz) - uvert(nz,2)=w*(UV(2,nz-1,elem)-UV(2,nz,elem))*dZ_inv(nz) - end if - END DO - !!PS UV_rhsAB(1,1:nl1,elem) = UV_rhsAB(1,1:nl1,elem) - uvert(1:nl1,1)*elem_area(elem) - !!PS UV_rhsAB(2,1:nl1,elem) = UV_rhsAB(2,1:nl1,elem) - uvert(1:nl1,2)*elem_area(elem) - UV_rhsAB(1,nzmin:nzmax,elem) = UV_rhsAB(1,nzmin:nzmax,elem) - uvert(nzmin:nzmax,1)*elem_area(elem) - UV_rhsAB(2,nzmin:nzmax,elem) = UV_rhsAB(2,nzmin:nzmax,elem) - uvert(nzmin:nzmax,2)*elem_area(elem) - - END DO - - ! ======================= - ! Update the rhs - ! ======================= - gg=(1.5_WP+epsilon) - if(lfirst.and.(.not.r_restart)) then - gg=1.0_WP - lfirst=.false. - end if - - DO elem=1, myDim_elem2D !! P(e) elem=1, elem2D - !! elem=myList_elem2D(m) - elem_area_inv = dt/elem_area(elem) - nzmin = ulevels(elem) - nzmax = nlevels(elem) - !!PS DO nz=1,nlevels(elem)-1 - DO nz=nzmin,nzmax-1 - UV_rhs(1,nz,elem)= (UV_rhs(1,nz,elem)+UV_rhsAB(1,nz,elem)*gg) *elem_area_inv - UV_rhs(2,nz,elem)= (UV_rhs(2,nz,elem)+UV_rhsAB(2,nz,elem)*gg) *elem_area_inv - END DO - END DO - ! U_rhs contains all contributions to velocity from old time steps -end subroutine compute_vel_rhs_vinv diff --git a/src/psolve_feom.c b/src/psolve_feom.c deleted file mode 100644 index 1beff1cf4..000000000 --- a/src/psolve_feom.c +++ /dev/null @@ -1,264 +0,0 @@ -#ifdef PARMS - -#include -#include -#include -#include -#include -#include "psolve.h" - -#define NSOL 1 -//#define NSOL 10 - -psolver solvers[NSOL]; -int solv_id[12] = {0}; -int nsolver = 0; - -void psolver_init_(int *id, SOLVERTYPE *stype, PCTYPE *pctype, PCILUTYPE *pcilutype, - int *ilulevel, int *fillin, double *droptol, int *maxits, int *restart, double *soltol, - int *part, int *rptr, int *cols, double *vals, int *reuse) -{ - - parms_Viewer v; - int i, j, k, nloc, pid, nproc; - int *ncnts, *idxn, *rp=NULL, *r=NULL, *c=NULL, nmb; - double tmp, *scale, *values=NULL; - - parms_Map map; - parms_Mat A; - parms_PC pc; - parms_Solver ksp; - - psolver solver; - - MPI_Comm_rank(MPI_COMM_WORLD,&pid); - MPI_Comm_size(MPI_COMM_WORLD,&nproc); - - solver = malloc(sizeof(*solver)); - nmb = part[nproc]-part[0]; - - nloc = 0; - if(nproc > 1) { - nloc = part[pid+1]-part[pid]; - parms_MapCreateFromPetsc(&map, nloc, nmb, MPI_COMM_WORLD); - idxn = malloc(nloc*sizeof(int)); - parms_MapGetGlobalIndices(map, idxn); - } - else { - parms_MapCreateFromLocal(&map,nmb,0); - nloc = nmb; - idxn = malloc(nloc*sizeof(int)); - for(i = 0; i < nloc; i++) - idxn[i] = i; - } - solver->reuse = *reuse; - - scale = malloc(nloc*sizeof(double)); - values = malloc(rptr[nloc]*sizeof(double)); - for(i = 0; i < nloc; i++){ - tmp = 0.; - for(j = rptr[i]; j < rptr[i+1]; j++) - tmp += fabs(vals[j]); - scale[i] = 1./tmp; - for(j = rptr[i]; j < rptr[i+1]; j++) - values[j] = vals[j]*scale[i]; - } - solver->scale = scale; - - // create Mat - parms_MatCreate(&A,map); - parms_MatSetValues(A, nloc, idxn, rptr, cols, values, INSERT); - parms_MatSetup(A); - - // create PC/Solver & set parameters - parms_PCCreate(&pc,A); - set_pc_params(pc, *pctype, *pcilutype, *ilulevel, *fillin, *droptol); - parms_PCSetup(pc); - parms_SolverCreate(&ksp,A,pc); - set_solver_params(ksp, *stype, *maxits, *restart, *soltol); - - solver->ksp = ksp; - solver->map = map; - - if(solver->reuse){ - r = malloc((nloc)*sizeof(int)); - for(i = 0; i < nloc; i++) - r[i] = idxn[i]; - solver->rows = r; - - rp = malloc((nloc+1)*sizeof(int)); - for(i = 0; i < nloc+1; i++) - rp[i] = rptr[i]; - solver->rptr = rp; - - c = malloc(rptr[nloc]*sizeof(int)); - for(i = 0; i < rptr[nloc]; i++) - c[i] = cols[i]; - solver->cols = c; - solver->vals = values; - } - else{ - if(!r) - free(r); - if(!rp) - free(rp); - if(!c) - free(c); - if(!values) - free(values); - } - - - solv_id[*id] = nsolver; - solvers[nsolver++] = solver; - -} - -void psolver_final_() -{ - - int pid, i; - parms_Solver ksp; - parms_PC pc; - parms_Mat A; - parms_Map map; - psolver solver; - - for(i = 0; i < nsolver; i++){ - solver = solvers[i]; - - ksp = solver->ksp; - parms_SolverGetPC(ksp, &pc); - parms_SolverGetMat(ksp,&A); - map = solver->map; - - parms_SolverFree(&ksp); - parms_PCFree(&pc); - parms_MatFree(&A); - parms_MapFree(&map); - - if(solver->reuse){ - free(solver->rptr); - free(solver->rows); - free(solver->cols); - free(solver->vals); - } - free(solver->scale); - - free(solver); - } -} - -void psolve_(int *id, double *rhs, double *vals, double *sol, int *new) -{ - - parms_Viewer v; - psolver solver; - parms_Map map; - parms_Mat A; - parms_PC pc; - parms_Solver ksp; - - double resnorm; - double *x,*y, *scale, *values, tmp; - double t0,t1,t2,toh; - int *rptr, *cols; - int its,err,i,j,k, cnt, nloc, pid, sid; - - sid = solv_id[*id]; - - solver = solvers[sid]; - ksp = solver->ksp; - map = solver->map; - nloc = parms_MapGetLocalSize(map); - - scale = solver->scale; - - if(*new) { - if(solver->reuse){ - parms_SolverGetPC(ksp, &pc); - parms_SolverGetMat(ksp, &A); - - rptr = solver->rptr; - cols = solver->cols; - - values = solver->vals; - for(i = 0; i < nloc; i++){ - tmp = 0.; - for(j = rptr[i]; j < rptr[i+1]; j++) - tmp += fabs(vals[j]); - scale[i] = 1./tmp; - for(j = rptr[i]; j < rptr[i+1]; j++) - values[j] = vals[j]*scale[i]; - } - - // create Mat & set values - parms_MatReset(A,SAME_NONZERO_STRUCTURE); - parms_MatSetValues(A, nloc, solver->rows, rptr, cols, values, INSERT); - parms_MatSetup(A); - - parms_PCSetup(pc); - } - else - printf("ERROR: matrix data is static\n"); - } - - x = sol; - y = malloc(nloc*sizeof(double)); - for(i = 0; i < nloc; i++) - y[i] = rhs[i]*scale[i]; - - // solve system of equations - parms_SolverApply(ksp,y,x); - -/* - // get trueresidual and number of iterations - parms_SolverGetResidualNorm2(ksp,y,x,&resnorm); - its = parms_SolverGetIts(ksp); - printf("%e %d\n", resnorm, its); -*/ - - free(y); -} - -int set_pc_params(parms_PC pc, PCTYPE pctype, PCILUTYPE pcilutype, - int ilulevel, int fillin, double droptol){ - - int i, lfil[7]; - double dtol[7]; - - for(i = 0; i < 7; i++){ - lfil[i] = fillin; - dtol[i] = droptol; - } - - parms_PCSetType(pc, pctype); - parms_PCSetILUType(pc, pcilutype); - parms_PCSetNlevels(pc, ilulevel); - parms_PCSetFill(pc, lfil); - parms_PCSetTol(pc, dtol); - - return 0; -} - - -int set_solver_params(parms_Solver solver, SOLVERTYPE solvertype, - int maxits, int restart, double soltol){ - char buf[100]; - - parms_SolverSetType(solver, solvertype); - - sprintf(buf, "%d", maxits); - parms_SolverSetParam(solver, MAXITS, buf); - - sprintf(buf, "%d", restart); - parms_SolverSetParam(solver, KSIZE, buf); - - sprintf(buf, "%g", soltol); - parms_SolverSetParam(solver, DTOL, buf); - - return 0; -} - - -#endif diff --git a/src/solver.F90 b/src/solver.F90 new file mode 100644 index 000000000..7b1c7d503 --- /dev/null +++ b/src/solver.F90 @@ -0,0 +1,284 @@ +module ssh_solve_preconditioner_interface + interface + subroutine ssh_solve_preconditioner(solverinfo, partit, mesh) + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_solverinfo), intent(inout), target :: solverinfo + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + end subroutine + end interface +end module + +module ssh_solve_cg_interface + interface + subroutine ssh_solve_cg(x, rhs, solverinfo, partit, mesh) + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + type(t_solverinfo), intent(inout), target :: solverinfo + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + real(kind=WP), intent(inout) :: x(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: rhs(partit%myDim_nod2D+partit%eDim_nod2D) + end subroutine + end interface +end module +!========================================================================= +subroutine ssh_solve_preconditioner(solverinfo, partit, mesh) + ! Preconditioner follows MITgcm (JGR, 102,5753-5766, 1997) + ! If the row r of the ssh equation is a_r eta_r +\sum a_i\eta_i=rhs_row_r + ! where summation is over all nodes neighboring node r, + ! the inverse of the preconditioner matrix has the coefficients + ! 1/a_r, .... -2*a_i/a_r/(a_r+(a_diag)_i) .... + ! Here (a_diag)_i is the diagonal value in row i of the ssh matrix. + + ! The inverse of preconditioner matrix (M^{-1} in general notation and K in the + ! paper cited) is, in reality, one iteration of the + ! Jacobi method, with symmetrization. We need symmetrization to be able to use + ! the conjugate gradient method. + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE g_comm_auto + IMPLICIT NONE + type(t_solverinfo), intent(inout), target :: solverinfo + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + integer :: nend, row, node, n, offset + real(kind=WP), allocatable :: diag_values(:) + real(kind=WP), pointer :: pr_values(:) + integer, pointer :: rptr(:), cind(:) + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + nend=ssh_stiff%rowptr(myDim_nod2D+1)-ssh_stiff%rowptr(1) + allocate(mesh%ssh_stiff%pr_values(nend)) ! Will store the values of inverse preconditioner matrix + pr_values=>mesh%ssh_stiff%pr_values + cind =>mesh%ssh_stiff%colind_loc + rptr =>mesh%ssh_stiff%rowptr_loc + allocate(diag_values(myDim_nod2D+eDim_nod2D)) ! Temporary, will be thrown away + + DO row=1, myDim_nod2D + offset=ssh_stiff%rowptr(row)- ssh_stiff%rowptr(1)+1 + diag_values(row)=ssh_stiff%values(offset) + END DO + call exchange_nod(diag_values, partit) ! We have diagonal values + ! ========== + ! Fill in the preconditioner + ! ========== + DO row=1, myDim_nod2D + offset=ssh_stiff%rowptr(row)-ssh_stiff%rowptr(1) + nend=ssh_stiff%rowptr(row+1)-ssh_stiff%rowptr(row) + pr_values(offset+1)=1.0_WP/ssh_stiff%values(offset+1) + DO n=2, nend + node=cind(offset+n) ! Will be ssh_stiff$colind(offset+n) + pr_values(n+offset)=-0.5_WP*(ssh_stiff%values(n+offset)/ssh_stiff%values(1+offset))/ & + (ssh_stiff%values(1+offset)+ diag_values(node)) + END DO + END DO + deallocate(diag_values) + + n=myDim_nod2D+eDim_nod2D + allocate(solverinfo%rr(n), solverinfo%zz(n), solverinfo%pp(n), solverinfo%App(n)) + solverinfo%rr =0.0_WP + solverinfo%zz =0.0_WP + solverinfo%pp =0.0_WP + solverinfo%App=0.0_WP +end subroutine ssh_solve_preconditioner + +! ======================================================== +subroutine ssh_solve_cg(x, rhs, solverinfo, partit, mesh) + ! Conjugate gradient solver + ! Our ssh matrix is symmetric, because we compute divergencethe contributions as + ! integrated over area of scalar control volume. + ! + ! I tried first to follow the MITgcm paper, but I have doubts about + ! their computations of beta. The variant below -- see Wikipedia. + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_DYN + USE g_comm_auto + IMPLICIT NONE + type(t_solverinfo), intent(inout), target :: solverinfo + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(inout), target :: mesh + real(kind=WP), intent(inout) :: x(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: rhs(partit%myDim_nod2D+partit%eDim_nod2D) + integer :: row, nini, nend, iter + real(kind=WP) :: sprod(2), s_old, s_aux, al, be, rtol + integer :: req + real(kind=WP), pointer :: pr_values(:), rr(:), zz(:), pp(:), App(:) + integer, pointer :: rptr(:), cind(:) + + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + pr_values=>mesh%ssh_stiff%pr_values + cind =>mesh%ssh_stiff%colind_loc + rptr =>mesh%ssh_stiff%rowptr_loc + + rr =>solverinfo%rr + zz =>solverinfo%zz + pp =>solverinfo%pp + App=>solverinfo%App + + ! ============== + ! Initialization. We solve AX=b, r_0=b-AX_0 + ! ============== + ! Define working tolerance: + ! ============== +#if !defined(__openmp_reproducible) + s_old=0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_old) + DO row=1, myDim_nod2D + s_old=s_old+rhs(row)*rhs(row) + END DO +!$OMP END PARALLEL DO +#else + s_old = sum(rhs(1:myDim_nod2D) * rhs(1:myDim_nod2D)) +#endif + + call MPI_Allreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) + rtol=solverinfo%soltol*sqrt(s_old/real(nod2D,WP)) + ! ============== + ! Compute r0 + ! ============== +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) + DO row=1, myDim_nod2D + rr(row)=rhs(row)-sum(ssh_stiff%values(rptr(row):rptr(row+1)-1)* & + X(cind(rptr(row):rptr(row+1)-1))) + END DO +!$OMP END PARALLEL DO + call exchange_nod(rr, partit) +!$OMP BARRIER + ! ============= + ! z_0=M^{-1} r_0 (M^{-1} is the precondit. matrix) + ! pp is the search direction + ! ============= +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) + DO row=1, myDim_nod2D + zz(row)= sum(pr_values(rptr(row):rptr(row+1)-1)*rr(cind(rptr(row):rptr(row+1)-1))) + pp(row)=zz(row) + END DO +!$OMP END PARALLEL DO + ! =============== + ! Scalar product of r*z + ! =============== + +#if !defined(__openmp_reproducible) + s_old=0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_old) + DO row=1, myDim_nod2D + s_old=s_old+rr(row)*zz(row) + END DO +!$OMP END PARALLEL DO +#else + s_old = sum(rr(1:myDim_nod2D) * zz(1:myDim_nod2D)) +#endif + + call MPI_Allreduce(MPI_IN_PLACE, s_old, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) + + ! =============== + ! Iterations + ! =============== + Do iter=1, solverinfo%maxiter + ! ============ + ! Compute Ap + ! ============ + call exchange_nod(pp, partit) ! Update before matrix-vector multiplications +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) + DO row=1, myDim_nod2D + App(row)=sum(ssh_stiff%values(rptr(row):rptr(row+1)-1)*pp(cind(rptr(row):rptr(row+1)-1))) + END DO +!$OMP END PARALLEL DO + ! ============ + ! Scalar products for alpha + ! ============ + +#if !defined(__openmp_reproducible) + s_aux=0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:s_aux) + DO row=1, myDim_nod2D + s_aux=s_aux+pp(row)*App(row) + END DO +!$OMP END PARALLEL DO +#else + s_aux = sum(pp(1:myDim_nod2D) * App(1:myDim_nod2D)) +#endif + + call MPI_Allreduce(MPI_IN_PLACE, s_aux, 1, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) + al=s_old/s_aux + ! =========== + ! New X and residual r + ! =========== +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) + DO row=1, myDim_nod2D + X(row) =X(row) +al* pp(row) + rr(row)=rr(row)-al*App(row) + END DO +!$OMP END PARALLEL DO + ! =========== + ! New z + ! =========== + call exchange_nod(rr, partit) ! Update before matrix-vector multiplications +!$OMP BARRIER +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) + DO row=1, myDim_nod2D + zz(row)= sum(pr_values(rptr(row):rptr(row+1)-1)*rr(cind(rptr(row):rptr(row+1)-1))) + END DO +!$OMP END PARALLEL DO + ! =========== + ! Scalar products for beta + ! =========== +#if !defined(__openmp_reproducible) +sprod(1:2)=0.0_WP +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) REDUCTION(+:sprod) + DO row=1, myDim_nod2D + sprod(1)=sprod(1)+rr(row)*zz(row) + sprod(2)=sprod(2)+rr(row)*rr(row) + END DO +!$OMP END PARALLEL DO +#else + sprod(1) = sum(rr(1:myDim_nod2D) * zz(1:myDim_nod2D)) + sprod(1) = sum(rr(1:myDim_nod2D) * rr(1:myDim_nod2D)) +#endif + + call MPI_Allreduce(MPI_IN_PLACE, sprod, 2, MPI_DOUBLE, MPI_SUM, partit%MPI_COMM_FESOM, MPIerr) + +!$OMP BARRIER + ! =========== + ! Exit if tolerance is achieved + ! =========== + if (sqrt(sprod(2)/nod2D)< rtol) then + exit + endif + be=sprod(1)/s_old + s_old=sprod(1) + ! =========== + ! New p + ! =========== +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(row) + DO row=1,myDim_nod2D + pp(row)=zz(row)+be*pp(row) + END DO +!$OMP END PARALLEL DO + END DO + ! At the end: The result is in X, but it needs a halo exchange. + call exchange_nod(x, partit) +!$OMP BARRIER +end subroutine ssh_solve_cg + +! =================================================================== + diff --git a/src/temp/MOD_MESH.F90 b/src/temp/MOD_MESH.F90 new file mode 100644 index 000000000..4eb0c23e1 --- /dev/null +++ b/src/temp/MOD_MESH.F90 @@ -0,0 +1,329 @@ +!========================================================== +MODULE MOD_MESH +USE O_PARAM +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +USE, intrinsic :: ISO_FORTRAN_ENV +IMPLICIT NONE +SAVE +integer, parameter :: MAX_ADJACENT=32 ! Max allowed number of adjacent nodes + +TYPE SPARSE_MATRIX + integer :: nza + integer :: dim + real(kind=WP), allocatable, dimension(:) :: values + integer(int32), allocatable, dimension(:) :: colind + integer(int32), allocatable, dimension(:) :: rowptr + integer(int32), allocatable, dimension(:) :: colind_loc + integer(int32), allocatable, dimension(:) :: rowptr_loc +END TYPE SPARSE_MATRIX + +TYPE T_MESH +integer :: nod2D ! the number of 2D nodes +real(kind=WP) :: ocean_area, ocean_areawithcav +real(kind=WP), allocatable, dimension(:,:) :: coord_nod2D, geo_coord_nod2D +integer :: edge2D ! the number of 2D edges +integer :: edge2D_in ! the number of internal 2D edges +integer :: elem2D ! the number of 2D elements +integer, allocatable, dimension(:,:) :: elem2D_nodes ! elem2D_nodes(:,n) lists; 3 nodes of element n +integer, allocatable, dimension(:,:) :: edges ! edge(:,n) lists 2 nodes; edge n +integer, allocatable, dimension(:,:) :: edge_tri ! edge_tri(:,n) lists 2 + ! elements containing edge n: the first one is to left + ! of the line directed to the second node +integer, allocatable, dimension(:,:) :: elem_edges ! elem_edges(:,n) are edges of element n. +real(kind=WP), allocatable, dimension(:) :: elem_area +real(kind=WP), allocatable, dimension(:,:) :: edge_dxdy, edge_cross_dxdy +real(kind=WP), allocatable, dimension(:) :: elem_cos, metric_factor +integer, allocatable, dimension(:,:) :: elem_neighbors +integer, allocatable, dimension(:,:) :: nod_in_elem2D +real(kind=WP), allocatable, dimension(:,:) :: x_corners, y_corners ! cornes for the scalar points +integer, allocatable, dimension(:) :: nod_in_elem2D_num +real(kind=WP), allocatable, dimension(:) :: depth ! depth(n) is the depths at node n +real(kind=WP), allocatable, dimension(:,:) :: gradient_vec + ! coefficients of linear reconstruction + ! of velocities on elements +real(kind=WP), allocatable, dimension(:,:) :: gradient_sca ! Coefficients to compute gradient of scalars + ! on elements +INTEGER, ALLOCATABLE, DIMENSION(:) :: bc_index_nod2D(:) + ! vertical structure +! +! +!___vertical mesh info__________________________________________________________ +! total number of layers +integer :: nl + +! initial layer, mid-depth layer and element depth +real(kind=WP), allocatable, dimension(:) :: zbar, Z,elem_depth + +! upper boudnary index of all vertical vertice/element loops, default==1 but when +! cavity is used becomes index of cavity-ocean boundary at vertices and elements +integer, allocatable, dimension(:) :: ulevels, ulevels_nod2D, ulevels_nod2D_max + +! number of levels at elem and vertices considering bottom topography +integer, allocatable, dimension(:) :: nlevels, nlevels_nod2D, nlevels_nod2D_min + +! +! +!___horizontal mesh info________________________________________________________ +real(kind=WP), allocatable, dimension(:,:) :: area, area_inv, areasvol, areasvol_inv +real(kind=WP), allocatable, dimension(:) :: mesh_resolution + +! +! +!___cavity mesh info____________________________________________________________ +! level index of cavity-ocean boundary at vertices and elements +! --> see: ulevels, ulevels_nod2D (fvom_main) + +! vertice/element yes=1/no=0 flag if cavity exists +integer, allocatable, dimension(:) :: cavity_flag_n, cavity_flag_e + +! depth of cavity-ocean interface +real(kind=WP), allocatable, dimension(:) :: cavity_depth + + +real(kind=WP), allocatable, dimension(:,:) :: cavity_nrst_cavlpnt_xyz + +! +! +!___Elevation stiffness matrix__________________________________________________ +type(sparse_matrix) :: ssh_stiff + +!#if defined (__oasis) +real(kind=WP), allocatable, dimension(:) :: lump2d_south, lump2d_north +integer, allocatable, dimension(:) :: ind_south, ind_north +!#endif + +integer :: nn_size +integer, allocatable, dimension(:) :: nn_num +integer, allocatable, dimension(:,:) :: nn_pos + +!_______________________________________________________________________________ +! Arrays added for ALE implementation: +! --> layer thinkness at node and depthlayer for t=n and t=n+1 +real(kind=WP), allocatable,dimension(:,:) :: hnode, hnode_new, zbar_3d_n, Z_3d_n + +! --> layer thinkness at elements, interpolated from hnode +real(kind=WP), allocatable,dimension(:,:) :: helem + +! --> thinkness of bottom elem (important for partial cells) +real(kind=WP), allocatable,dimension(:) :: bottom_elem_thickness +real(kind=WP), allocatable,dimension(:) :: bottom_node_thickness + +! --> The increment of total fluid depth on elements. It is used to update the matrix +real(kind=WP), allocatable,dimension(:) :: dhe + +! --> hbar, hbar_old: correspond to the elevation, but on semi-integer time steps. +real(kind=WP), allocatable,dimension(:) :: hbar, hbar_old + +! --> auxiliary array to store depth of layers and depth of mid level due to changing +! layer thinkness at every node +real(kind=WP), allocatable,dimension(:) :: zbar_n, Z_n + +! new bottom depth at node and element due to partial cells +real(kind=WP), allocatable,dimension(:) :: zbar_n_bot +real(kind=WP), allocatable,dimension(:) :: zbar_e_bot + +! new depth of cavity-ocean interface at node and element due to partial cells +real(kind=WP), allocatable,dimension(:) :: zbar_n_srf +real(kind=WP), allocatable,dimension(:) :: zbar_e_srf + +character(:), allocatable :: representative_checksum + +contains + procedure write_t_mesh + procedure read_t_mesh + generic :: write(unformatted) => write_t_mesh + generic :: read(unformatted) => read_t_mesh +END TYPE T_MESH + +contains + +! Unformatted writing for t_mesh +subroutine write_t_mesh(mesh, unit, iostat, iomsg) + IMPLICIT NONE + class(t_mesh), intent(in) :: mesh + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i, j, k + integer :: s1, s2, s3 + ! write records (giving sizes for the allocation for arrays) + write(unit, iostat=iostat, iomsg=iomsg) mesh%nod2D + write(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_area + write(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_areawithcav + write(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D + write(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D_in + write(unit, iostat=iostat, iomsg=iomsg) mesh%elem2D + call write_bin_array(mesh%elem2D_nodes, unit, iostat, iomsg) + call write_bin_array(mesh%edges, unit, iostat, iomsg) + call write_bin_array(mesh%edge_tri, unit, iostat, iomsg) + call write_bin_array(mesh%elem_edges, unit, iostat, iomsg) + call write_bin_array(mesh%elem_area, unit, iostat, iomsg) + call write_bin_array(mesh%edge_dxdy, unit, iostat, iomsg) + + call write_bin_array(mesh%edge_cross_dxdy, unit, iostat, iomsg) + call write_bin_array(mesh%elem_cos, unit, iostat, iomsg) + call write_bin_array(mesh%metric_factor, unit, iostat, iomsg) + call write_bin_array(mesh%elem_neighbors, unit, iostat, iomsg) + call write_bin_array(mesh%nod_in_elem2D, unit, iostat, iomsg) + call write_bin_array(mesh%x_corners, unit, iostat, iomsg) + call write_bin_array(mesh%y_corners, unit, iostat, iomsg) + call write_bin_array(mesh%nod_in_elem2D_num, unit, iostat, iomsg) + call write_bin_array(mesh%depth, unit, iostat, iomsg) + call write_bin_array(mesh%gradient_vec, unit, iostat, iomsg) + call write_bin_array(mesh%gradient_sca, unit, iostat, iomsg) + call write_bin_array(mesh%bc_index_nod2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) mesh%nl + + call write_bin_array(mesh%zbar, unit, iostat, iomsg) + call write_bin_array(mesh%Z, unit, iostat, iomsg) + call write_bin_array(mesh%elem_depth, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels_nod2D, unit, iostat, iomsg) + call write_bin_array(mesh%ulevels_nod2D_max, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels_nod2D, unit, iostat, iomsg) + call write_bin_array(mesh%nlevels_nod2D_min, unit, iostat, iomsg) + call write_bin_array(mesh%area, unit, iostat, iomsg) + call write_bin_array(mesh%area_inv, unit, iostat, iomsg) + call write_bin_array(mesh%areasvol, unit, iostat, iomsg) + call write_bin_array(mesh%areasvol_inv, unit, iostat, iomsg) + call write_bin_array(mesh%mesh_resolution, unit, iostat, iomsg) + + call write_bin_array(mesh%cavity_flag_n, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_flag_e, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_depth, unit, iostat, iomsg) + call write_bin_array(mesh%cavity_nrst_cavlpnt_xyz, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%dim + write(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%nza + + call write_bin_array(mesh%ssh_stiff%rowptr, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%colind, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%values, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%colind_loc, unit, iostat, iomsg) + call write_bin_array(mesh%ssh_stiff%rowptr_loc, unit, iostat, iomsg) + + call write_bin_array(mesh%lump2d_south, unit, iostat, iomsg) + call write_bin_array(mesh%lump2d_north, unit, iostat, iomsg) + call write_bin_array(mesh%ind_south, unit, iostat, iomsg) + call write_bin_array(mesh%ind_north, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size + call write_bin_array(mesh%nn_num, unit, iostat, iomsg) + call write_bin_array(mesh%nn_pos, unit, iostat, iomsg) + call write_bin_array(mesh%hnode, unit, iostat, iomsg) + call write_bin_array(mesh%hnode_new, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_3d_n, unit, iostat, iomsg) + call write_bin_array(mesh%Z_3d_n, unit, iostat, iomsg) + call write_bin_array(mesh%helem, unit, iostat, iomsg) + call write_bin_array(mesh%bottom_elem_thickness, unit, iostat, iomsg) + call write_bin_array(mesh%bottom_node_thickness, unit, iostat, iomsg) + call write_bin_array(mesh%dhe, unit, iostat, iomsg) + call write_bin_array(mesh%hbar, unit, iostat, iomsg) + call write_bin_array(mesh%hbar_old, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n, unit, iostat, iomsg) + call write_bin_array(mesh%Z_n, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) + call write_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) +! call write_bin_array(mesh%representative_checksum, unit, iostat, iomsg) +end subroutine write_t_mesh + +! Unformatted reading for t_mesh +subroutine read_t_mesh(mesh, unit, iostat, iomsg) + IMPLICIT NONE + class(t_mesh), intent(inout) :: mesh + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i, j, k + integer :: s1, s2, s3 + ! write records (giving sizes for the allocation for arrays) + read(unit, iostat=iostat, iomsg=iomsg) mesh%nod2D + read(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_area + read(unit, iostat=iostat, iomsg=iomsg) mesh%ocean_areawithcav + read(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D + read(unit, iostat=iostat, iomsg=iomsg) mesh%edge2D_in + read(unit, iostat=iostat, iomsg=iomsg) mesh%elem2D + + call read_bin_array(mesh%elem2D_nodes, unit, iostat, iomsg) + call read_bin_array(mesh%edges, unit, iostat, iomsg) + call read_bin_array(mesh%edge_tri, unit, iostat, iomsg) + call read_bin_array(mesh%elem_edges, unit, iostat, iomsg) + call read_bin_array(mesh%elem_area, unit, iostat, iomsg) + call read_bin_array(mesh%edge_dxdy, unit, iostat, iomsg) + + call read_bin_array(mesh%edge_cross_dxdy, unit, iostat, iomsg) + call read_bin_array(mesh%elem_cos, unit, iostat, iomsg) + call read_bin_array(mesh%metric_factor, unit, iostat, iomsg) + call read_bin_array(mesh%elem_neighbors, unit, iostat, iomsg) + call read_bin_array(mesh%nod_in_elem2D, unit, iostat, iomsg) + call read_bin_array(mesh%x_corners, unit, iostat, iomsg) + call read_bin_array(mesh%y_corners, unit, iostat, iomsg) + call read_bin_array(mesh%nod_in_elem2D_num, unit, iostat, iomsg) + call read_bin_array(mesh%depth, unit, iostat, iomsg) + call read_bin_array(mesh%gradient_vec, unit, iostat, iomsg) + call read_bin_array(mesh%gradient_sca, unit, iostat, iomsg) + call read_bin_array(mesh%bc_index_nod2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) mesh%nl + + call read_bin_array(mesh%zbar, unit, iostat, iomsg) + call read_bin_array(mesh%Z, unit, iostat, iomsg) + call read_bin_array(mesh%elem_depth, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels_nod2D, unit, iostat, iomsg) + call read_bin_array(mesh%ulevels_nod2D_max, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels_nod2D, unit, iostat, iomsg) + call read_bin_array(mesh%nlevels_nod2D_min, unit, iostat, iomsg) + call read_bin_array(mesh%area, unit, iostat, iomsg) + call read_bin_array(mesh%area_inv, unit, iostat, iomsg) + call read_bin_array(mesh%areasvol, unit, iostat, iomsg) + call read_bin_array(mesh%areasvol_inv, unit, iostat, iomsg) + call read_bin_array(mesh%mesh_resolution, unit, iostat, iomsg) + + call read_bin_array(mesh%cavity_flag_n, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_flag_e, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_depth, unit, iostat, iomsg) + call read_bin_array(mesh%cavity_nrst_cavlpnt_xyz, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%dim + read(unit, iostat=iostat, iomsg=iomsg) mesh%ssh_stiff%nza + + call read_bin_array(mesh%ssh_stiff%rowptr, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%colind, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%values, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%colind_loc, unit, iostat, iomsg) + call read_bin_array(mesh%ssh_stiff%rowptr_loc, unit, iostat, iomsg) + + call read_bin_array(mesh%lump2d_south, unit, iostat, iomsg) + call read_bin_array(mesh%lump2d_north, unit, iostat, iomsg) + call read_bin_array(mesh%ind_south, unit, iostat, iomsg) + call read_bin_array(mesh%ind_north, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) mesh%nn_size + call read_bin_array(mesh%nn_num, unit, iostat, iomsg) + call read_bin_array(mesh%nn_pos, unit, iostat, iomsg) + call read_bin_array(mesh%hnode, unit, iostat, iomsg) + call read_bin_array(mesh%hnode_new, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_3d_n, unit, iostat, iomsg) + call read_bin_array(mesh%Z_3d_n, unit, iostat, iomsg) + call read_bin_array(mesh%helem, unit, iostat, iomsg) + call read_bin_array(mesh%bottom_elem_thickness, unit, iostat, iomsg) + call read_bin_array(mesh%bottom_node_thickness, unit, iostat, iomsg) + call read_bin_array(mesh%dhe, unit, iostat, iomsg) + call read_bin_array(mesh%hbar, unit, iostat, iomsg) + call read_bin_array(mesh%hbar_old, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n, unit, iostat, iomsg) + call read_bin_array(mesh%Z_n, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n_bot, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_e_bot, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_n_srf, unit, iostat, iomsg) + call read_bin_array(mesh%zbar_e_srf, unit, iostat, iomsg) +! call read_bin_array(mesh%representative_checksum, unit, iostat, iomsg) +end subroutine read_t_mesh +end module MOD_MESH +!========================================================== + diff --git a/src/temp/MOD_PARTIT.F90 b/src/temp/MOD_PARTIT.F90 new file mode 100644 index 000000000..bd3b7dec2 --- /dev/null +++ b/src/temp/MOD_PARTIT.F90 @@ -0,0 +1,189 @@ +!========================================================== +! Variables to organize parallel work +module MOD_PARTIT +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE +include 'mpif.h' +integer, parameter :: MAX_LAENDERECK=16 +integer, parameter :: MAX_NEIGHBOR_PARTITIONS=32 + + +type com_struct + integer :: rPEnum ! the number of PE I receive info from + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: rPE ! their list + integer, dimension(MAX_NEIGHBOR_PARTITIONS+1) :: rptr ! allocatables to the list of nodes + integer, dimension(:), allocatable :: rlist ! the list of nodes + integer :: sPEnum ! send part + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sPE + integer, dimension(MAX_NEIGHBOR_PARTITIONS) :: sptr + integer, dimension(:), allocatable :: slist + integer, dimension(:), allocatable :: req ! request for MPI_Wait + integer :: nreq ! number of requests for MPI_Wait + ! (to combine halo exchange of several fields) + contains + procedure WRITE_T_COM_STRUCT + procedure READ_T_COM_STRUCT + generic :: write(unformatted) => WRITE_T_COM_STRUCT + generic :: read(unformatted) => READ_T_COM_STRUCT +end type com_struct + +TYPE T_PARTIT + integer :: MPI_COMM_FESOM ! FESOM communicator (for ocean only runs if often a copy of MPI_COMM_WORLD) + + type(com_struct) :: com_nod2D + type(com_struct) :: com_elem2D + type(com_struct) :: com_elem2D_full + + ! MPI Datatypes for interface exchange + ! Element fields (2D; 2D integer; 3D with nl-1 or nl levels, 1 - 4 values) + ! small halo and / or full halo + !!! s(r)_mpitype_* are constructed during the runtime ans shall not be dumped!!! + integer, allocatable :: s_mpitype_elem2D(:,:), r_mpitype_elem2D(:,:) + integer, allocatable :: s_mpitype_elem2D_full_i(:), r_mpitype_elem2D_full_i(:) + integer, allocatable :: s_mpitype_elem2D_full(:,:), r_mpitype_elem2D_full(:,:) + integer, allocatable :: s_mpitype_elem3D(:,:,:), r_mpitype_elem3D(:,:,:) + integer, allocatable :: s_mpitype_elem3D_full(:,:,:),r_mpitype_elem3D_full(:,:,:) + + ! Nodal fields (2D; 2D integer; 3D with nl-1 or nl levels, one, two, or three values) + integer, allocatable :: s_mpitype_nod2D(:), r_mpitype_nod2D(:) + integer, allocatable :: s_mpitype_nod2D_i(:), r_mpitype_nod2D_i(:) + integer, allocatable :: s_mpitype_nod3D(:,:,:), r_mpitype_nod3D(:,:,:) + + ! general MPI part + integer :: MPIERR + integer :: npes + integer :: mype + integer :: maxPEnum=100 + integer, allocatable, dimension(:) :: part + + ! Mesh partition + integer :: myDim_nod2D, eDim_nod2D + integer, allocatable, dimension(:) :: myList_nod2D + integer :: myDim_elem2D, eDim_elem2D, eXDim_elem2D + integer, allocatable, dimension(:) :: myList_elem2D + integer :: myDim_edge2D, eDim_edge2D + integer, allocatable, dimension(:) :: myList_edge2D + + integer :: pe_status = 0 ! if /=0 then something is wrong + !!! remPtr_* are constructed during the runtime ans shall not be dumped!!! + integer, allocatable :: remPtr_nod2D(:), remList_nod2D(:) + integer, allocatable :: remPtr_elem2D(:), remList_elem2D(:) + + logical :: elem_full_flag + contains + procedure WRITE_T_PARTIT + procedure READ_T_PARTIT + generic :: write(unformatted) => WRITE_T_PARTIT + generic :: read(unformatted) => READ_T_PARTIT +END TYPE T_PARTIT +contains + +! Unformatted writing for COM_STRUCT TYPE +subroutine WRITE_T_COM_STRUCT(tstruct, unit, iostat, iomsg) + IMPLICIT NONE + class(COM_STRUCT), intent(in) :: tstruct + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + write(unit, iostat=iostat, iomsg=iomsg) tstruct%rPEnum + call write1d_int_static(tstruct%rPE, unit, iostat, iomsg) + call write1d_int_static(tstruct%rptr, unit, iostat, iomsg) + call write_bin_array(tstruct%rlist, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tstruct%sPEnum + call write1d_int_static(tstruct%sPE, unit, iostat, iomsg) + call write1d_int_static(tstruct%sptr, unit, iostat, iomsg) + call write_bin_array(tstruct%slist, unit, iostat, iomsg) + ! req is constructed during the runtime + ! call write_bin_array(tstruct%req, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tstruct%nreq +end subroutine WRITE_T_COM_STRUCT + +subroutine READ_T_COM_STRUCT(tstruct, unit, iostat, iomsg) + IMPLICIT NONE + class(COM_STRUCT), intent(inout) :: tstruct + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + read(unit, iostat=iostat, iomsg=iomsg) tstruct%rPEnum + call read1d_int_static(tstruct%rPE, unit, iostat, iomsg) + call read1d_int_static(tstruct%rptr, unit, iostat, iomsg) + call read_bin_array(tstruct%rlist, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tstruct%sPEnum + call read1d_int_static(tstruct%sPE, unit, iostat, iomsg) + call read1d_int_static(tstruct%sptr, unit, iostat, iomsg) + call read_bin_array(tstruct%slist, unit, iostat, iomsg) +! req is constructed during the runtime +! call read_bin_array(tstruct%req, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tstruct%nreq +end subroutine READ_T_COM_STRUCT + +! Unformatted writing for T_PARTIT +subroutine WRITE_T_PARTIT(partit, unit, iostat, iomsg) + IMPLICIT NONE + class(T_PARTIT), intent(in) :: partit + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + write(unit, iostat=iostat, iomsg=iomsg) partit%com_nod2D + write(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D_full + + write(unit, iostat=iostat, iomsg=iomsg) partit%npes + write(unit, iostat=iostat, iomsg=iomsg) partit%mype + write(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum + call write_bin_array(partit%part, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_nod2D + call write_bin_array(partit%myList_nod2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_elem2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eXDim_elem2D + call write_bin_array(partit%myList_elem2D, unit, iostat, iomsg) + + write(unit, iostat=iostat, iomsg=iomsg) partit%myDim_edge2D + write(unit, iostat=iostat, iomsg=iomsg) partit%eDim_edge2D + call write_bin_array(partit%myList_edge2D, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) partit%pe_status +end subroutine WRITE_T_PARTIT +! Unformatted reading for T_PARTIT +subroutine READ_T_PARTIT(partit, unit, iostat, iomsg) + IMPLICIT NONE + class(T_PARTIT), intent(inout) :: partit + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + read(unit, iostat=iostat, iomsg=iomsg) partit%com_nod2D + read(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%com_elem2D_full + + read(unit, iostat=iostat, iomsg=iomsg) partit%npes + read(unit, iostat=iostat, iomsg=iomsg) partit%mype + read(unit, iostat=iostat, iomsg=iomsg) partit%maxPEnum + call read_bin_array(partit%part, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_nod2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_nod2D + call read_bin_array(partit%myList_nod2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_elem2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eXDim_elem2D + call read_bin_array(partit%myList_elem2D, unit, iostat, iomsg) + + read(unit, iostat=iostat, iomsg=iomsg) partit%myDim_edge2D + read(unit, iostat=iostat, iomsg=iomsg) partit%eDim_edge2D + call read_bin_array(partit%myList_edge2D, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) partit%pe_status +end subroutine READ_T_PARTIT + +end module MOD_PARTIT diff --git a/src/temp/MOD_READ_BINARY_ARRAYS.F90 b/src/temp/MOD_READ_BINARY_ARRAYS.F90 new file mode 100644 index 000000000..87f0b2389 --- /dev/null +++ b/src/temp/MOD_READ_BINARY_ARRAYS.F90 @@ -0,0 +1,118 @@ +!========================================================== +! +!------------------------------------------------------------------------------------------ +! useful interface (read_bin_array) for reading arbitary binary arrays into an opened file +MODULE MOD_READ_BINARY_ARRAYS +use o_PARAM +private +public :: read_bin_array, read1d_int_static +INTERFACE read_bin_array + MODULE PROCEDURE read1d_real, read1d_int, read1d_char, read2d_real, read2d_int, read3d_real, read3d_int +END INTERFACE +contains +subroutine read1d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_real + +subroutine read1d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_int + +subroutine read1d_char(arr, unit, iostat, iomsg) + character, intent(inout), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + allocate(arr(s1)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_char + +subroutine read1d_int_static(arr, unit, iostat, iomsg) + IMPLICIT NONE + integer, intent(inout) :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + read(unit, iostat=iostat, iomsg=iomsg) s1 + if (s1==0) return + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine read1d_int_static + +subroutine read2d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2 + if ((s1==0) .or. (s2==0)) return + allocate(arr(s1, s2)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) +end subroutine read2d_real + +subroutine read2d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2 + if ((s1==0) .or. (s2==0)) return + allocate(arr(s1, s2)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) +end subroutine read2d_int + +subroutine read3d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(inout), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + if ((s1==0) .or. (s2==0) .or. (s3==0)) return + allocate(arr(s1,s2,s3)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) +end subroutine read3d_real + +subroutine read3d_int(arr, unit, iostat, iomsg) + integer, intent(inout), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + read(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + if ((s1==0) .or. (s2==0) .or. (s3==0)) return + allocate(arr(s1,s2,s3)) + read(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) +end subroutine read3d_int +end module MOD_READ_BINARY_ARRAYS +!========================================================== + diff --git a/src/temp/MOD_TRACER.F90 b/src/temp/MOD_TRACER.F90 new file mode 100644 index 000000000..8e8247830 --- /dev/null +++ b/src/temp/MOD_TRACER.F90 @@ -0,0 +1,228 @@ +!========================================================== +MODULE MOD_TRACER +USE O_PARAM +USE, intrinsic :: ISO_FORTRAN_ENV +USE MOD_WRITE_BINARY_ARRAYS +USE MOD_READ_BINARY_ARRAYS +IMPLICIT NONE +SAVE + +TYPE T_TRACER_DATA +real(kind=WP), allocatable, dimension(:,:) :: values, valuesAB ! instant values & Adams-Bashfort interpolation +logical :: smooth_bh_tra=.false. +real(kind=WP) :: gamma0_tra, gamma1_tra, gamma2_tra +logical :: i_vert_diff =.false. +character(20) :: tra_adv_hor, tra_adv_ver, tra_adv_lim ! type of the advection scheme for this tracer +real(kind=WP) :: tra_adv_ph = 1. ! a parameter to be used in horizontal advection (for MUSCL it is the fraction of fourth-order contribution in the solution) +real(kind=WP) :: tra_adv_pv = 1. ! a parameter to be used in horizontal advection (for QR4C it is the fraction of fourth-order contribution in the solution) +integer :: ID + +contains + procedure WRITE_T_TRACER_DATA + procedure READ_T_TRACER_DATA + generic :: write(unformatted) => WRITE_T_TRACER_DATA + generic :: read(unformatted) => READ_T_TRACER_DATA +END TYPE T_TRACER_DATA + + +TYPE T_TRACER_WORK +!auxuary arrays to work with tracers: +real(kind=WP), allocatable :: del_ttf(:,:) +real(kind=WP), allocatable :: del_ttf_advhoriz(:,:),del_ttf_advvert(:,:) +!_______________________________________________________________________________ +! in case ldiag_DVD=.true. --> calculate discrete variance decay (DVD) +real(kind=WP), allocatable :: tr_dvd_horiz(:,:,:), tr_dvd_vert(:,:,:) +! The fct part +real(kind=WP),allocatable,dimension(:,:) :: fct_LO ! Low-order solution +real(kind=WP),allocatable,dimension(:,:) :: adv_flux_hor ! Antidif. horiz. contrib. from edges / backup for iterafive fct scheme +real(kind=WP),allocatable,dimension(:,:) :: adv_flux_ver ! Antidif. vert. fluxes from nodes / backup for iterafive fct scheme + +real(kind=WP),allocatable,dimension(:,:) :: fct_ttf_max,fct_ttf_min +real(kind=WP),allocatable,dimension(:,:) :: fct_plus,fct_minus +! MUSCL type reconstruction +integer,allocatable,dimension(:) :: nboundary_lay +integer,allocatable,dimension(:,:) :: edge_up_dn_tri +real(kind=WP),allocatable,dimension(:,:,:) :: edge_up_dn_grad + +contains + procedure WRITE_T_TRACER_WORK + procedure READ_T_TRACER_WORK + generic :: write(unformatted) => WRITE_T_TRACER_WORK + generic :: read(unformatted) => READ_T_TRACER_WORK +END TYPE T_TRACER_WORK + +! auxury type for reading namelist.tra +TYPE NML_TRACER_LIST_TYPE + INTEGER :: ID =-1 + CHARACTER(len=4) :: adv_hor ='NONE' + CHARACTER(len=4) :: adv_ver ='NONE' + CHARACTER(len=4) :: adv_lim ='NONE' + REAL(kind=WP) :: adv_ph =1. + REAL(kind=WP) :: adv_pv =1. +END TYPE NML_TRACER_LIST_TYPE + +TYPE T_TRACER +! total number of tracers: +integer :: num_tracers=2 +type(t_tracer_data), allocatable :: data(:) +type(t_tracer_work) :: work +! general options for all tracers (can be moved to T_TRACER is needed) +! bharmonic diffusion for tracers. We recommend to use this option in very high resolution runs (Redi is generally off there). +logical :: smooth_bh_tra = .false. +real(kind=WP) :: gamma0_tra = 0.0005 +real(kind=WP) :: gamma1_tra = 0.0125 +real(kind=WP) :: gamma2_tra = 0. +logical :: i_vert_diff = .true. + +contains +procedure WRITE_T_TRACER +procedure READ_T_TRACER +generic :: write(unformatted) => WRITE_T_TRACER +generic :: read(unformatted) => READ_T_TRACER +END TYPE T_TRACER + +contains + +! Unformatted writing for T_TRACER_DATA +subroutine WRITE_T_TRACER_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_DATA), intent(in) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call write_bin_array(tdata%values, unit, iostat, iomsg) + call write_bin_array(tdata%valuesAB, unit, iostat, iomsg) + write(unit, iostat=iostat, iomsg=iomsg) tdata%smooth_bh_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma0_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma1_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%gamma2_tra + write(unit, iostat=iostat, iomsg=iomsg) tdata%i_vert_diff + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_hor + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ver + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_lim + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ph + write(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_pv + write(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine WRITE_T_TRACER_DATA + +! Unformatted reading for T_TRACER_DATA +subroutine READ_T_TRACER_DATA(tdata, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_DATA), intent(inout) :: tdata + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call read_bin_array(tdata%values, unit, iostat, iomsg) + call read_bin_array(tdata%valuesAB, unit, iostat, iomsg) + read(unit, iostat=iostat, iomsg=iomsg) tdata%smooth_bh_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma0_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma1_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%gamma2_tra + read(unit, iostat=iostat, iomsg=iomsg) tdata%i_vert_diff + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_hor + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ver + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_lim + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_ph + read(unit, iostat=iostat, iomsg=iomsg) tdata%tra_adv_pv + read(unit, iostat=iostat, iomsg=iomsg) tdata%ID +end subroutine READ_T_TRACER_DATA + +! Unformatted writing for T_TRACER_WORK +subroutine WRITE_T_TRACER_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_WORK), intent(in) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call write_bin_array(twork%del_ttf, unit, iostat, iomsg) + call write_bin_array(twork%del_ttf_advhoriz, unit, iostat, iomsg) + call write_bin_array(twork%del_ttf_advvert, unit, iostat, iomsg) + call write_bin_array(twork%tr_dvd_horiz, unit, iostat, iomsg) + call write_bin_array(twork%tr_dvd_vert, unit, iostat, iomsg) + call write_bin_array(twork%fct_LO, unit, iostat, iomsg) + call write_bin_array(twork%adv_flux_hor, unit, iostat, iomsg) + call write_bin_array(twork%adv_flux_ver, unit, iostat, iomsg) + call write_bin_array(twork%fct_ttf_max, unit, iostat, iomsg) + call write_bin_array(twork%fct_ttf_min, unit, iostat, iomsg) + call write_bin_array(twork%fct_plus, unit, iostat, iomsg) + call write_bin_array(twork%fct_minus, unit, iostat, iomsg) + call write_bin_array(twork%nboundary_lay, unit, iostat, iomsg) + call write_bin_array(twork%edge_up_dn_tri, unit, iostat, iomsg) + call write_bin_array(twork%edge_up_dn_grad, unit, iostat, iomsg) +end subroutine WRITE_T_TRACER_WORK + +! Unformatted reading for T_TRACER_WORK +subroutine READ_T_TRACER_WORK(twork, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER_WORK), intent(inout) :: twork + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + + call read_bin_array(twork%del_ttf, unit, iostat, iomsg) + call read_bin_array(twork%del_ttf_advhoriz, unit, iostat, iomsg) + call read_bin_array(twork%del_ttf_advvert, unit, iostat, iomsg) + call read_bin_array(twork%tr_dvd_horiz, unit, iostat, iomsg) + call read_bin_array(twork%tr_dvd_vert, unit, iostat, iomsg) + call read_bin_array(twork%fct_LO, unit, iostat, iomsg) + call read_bin_array(twork%adv_flux_hor, unit, iostat, iomsg) + call read_bin_array(twork%adv_flux_ver, unit, iostat, iomsg) + call read_bin_array(twork%fct_ttf_max, unit, iostat, iomsg) + call read_bin_array(twork%fct_ttf_min, unit, iostat, iomsg) + call read_bin_array(twork%fct_plus, unit, iostat, iomsg) + call read_bin_array(twork%fct_minus, unit, iostat, iomsg) + call read_bin_array(twork%nboundary_lay, unit, iostat, iomsg) + call read_bin_array(twork%edge_up_dn_tri, unit, iostat, iomsg) + call read_bin_array(twork%edge_up_dn_grad, unit, iostat, iomsg) +end subroutine READ_T_TRACER_WORK + +! Unformatted writing for T_TRACER +subroutine WRITE_T_TRACER(tracer, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER), intent(in) :: tracer + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + + write(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers + do i=1, tracer%num_tracers + write(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) + end do + write(unit, iostat=iostat, iomsg=iomsg) tracer%work + write(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra + write(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +end subroutine WRITE_T_TRACER + +! Unformatted reading for T_TRACER +subroutine READ_T_TRACER(tracer, unit, iostat, iomsg) + IMPLICIT NONE + class(T_TRACER), intent(inout) :: tracer + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: i + + read(unit, iostat=iostat, iomsg=iomsg) tracer%num_tracers +! write(*,*) 'number of tracers to read: ', tracer%num_tracers + allocate(tracer%data(tracer%num_tracers)) + do i=1, tracer%num_tracers + read(unit, iostat=iostat, iomsg=iomsg) tracer%data(i) +! write(*,*) 'tracer info:', tracer%data(i)%ID, TRIM(tracer%data(i)%tra_adv_hor), TRIM(tracer%data(i)%tra_adv_ver), TRIM(tracer%data(i)%tra_adv_lim) + end do + read(unit, iostat=iostat, iomsg=iomsg) tracer%work + read(unit, iostat=iostat, iomsg=iomsg) tracer%smooth_bh_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma0_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma1_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%gamma2_tra + read(unit, iostat=iostat, iomsg=iomsg) tracer%i_vert_diff +end subroutine READ_T_TRACER +end module MOD_TRACER +!========================================================== + diff --git a/src/temp/MOD_WRITE_BINARY_ARRAYS.F90 b/src/temp/MOD_WRITE_BINARY_ARRAYS.F90 new file mode 100644 index 000000000..4f03b5cea --- /dev/null +++ b/src/temp/MOD_WRITE_BINARY_ARRAYS.F90 @@ -0,0 +1,160 @@ +!========================================================== +! +!------------------------------------------------------------------------------------------ +! useful interface (write_bin_array) for writing arbitary binary arrays into an opened file +MODULE MOD_WRITE_BINARY_ARRAYS +use o_PARAM +private +public :: write_bin_array, write1d_int_static +INTERFACE write_bin_array + MODULE PROCEDURE write1d_real, write1d_int, write1d_char, write2d_real, write2d_int, write3d_real, write3d_int +END INTERFACE +contains + +subroutine write1d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_real + +subroutine write1d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_int + +subroutine write1d_char(arr, unit, iostat, iomsg) + character, intent(in), allocatable :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + if (allocated(arr)) then + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) + else + s1=0 + write(unit, iostat=iostat, iomsg=iomsg) s1 + end if +end subroutine write1d_char + +subroutine write1d_int_static(arr, unit, iostat, iomsg) + IMPLICIT NONE + integer, intent(in) :: arr(:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1 + + s1=size(arr, 1) + write(unit, iostat=iostat, iomsg=iomsg) s1 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1) +end subroutine write1d_int_static + +subroutine write2d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) + else + s1=0 + s2=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + end if +end subroutine write2d_real + +subroutine write2d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2) + else + s1=0 + s2=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2 + end if +end subroutine write2d_int + + +subroutine write3d_real(arr, unit, iostat, iomsg) + real(kind=WP), intent(in), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + s3=size(arr, 3) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) + else + s1=0 + s2=0 + s3=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + end if +end subroutine write3d_real + +subroutine write3d_int(arr, unit, iostat, iomsg) + integer, intent(in), allocatable :: arr(:,:,:) + integer, intent(in) :: unit + integer, intent(out) :: iostat + character(*), intent(inout) :: iomsg + integer :: s1, s2, s3 + + if (allocated(arr)) then + s1=size(arr, 1) + s2=size(arr, 2) + s3=size(arr, 3) + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + write(unit, iostat=iostat, iomsg=iomsg) arr(1:s1, 1:s2, 1:s3) + else + s1=0 + s2=0 + s3=0 + write(unit, iostat=iostat, iomsg=iomsg) s1, s2, s3 + end if +end subroutine write3d_int +end module MOD_WRITE_BINARY_ARRAYS +!========================================================== + diff --git a/src/temp/gen_halo_exchange.F90 b/src/temp/gen_halo_exchange.F90 new file mode 100755 index 000000000..7b9f66e6b --- /dev/null +++ b/src/temp/gen_halo_exchange.F90 @@ -0,0 +1,2381 @@ +! ======================================================================== +! Halo exchange routines + broadcast routines that collect information +! on the entire field (needed for output) +! The routines here are very similar, difference is the data type and +! exchange pattern. +! exchange_nod2D_i(arr(myDim_nod2D+eDim_nod2D)) INTEGER +! exchange_nod2D(arr(myDim_nod2D+eDim_nod2D)) WP +! exchange_nod3D(arr(nl-1,myDim_nod2D+eDim_nod2D)) WP +! exchange_nod3D_full(arr(nl,myDim_nod2D+eDim_nod2D)) WP +! exchange_edge2D(edge_array2D) WP not used currently !!! no buffer!!! +! exchange_edge3D(edge_array3D) WP not used currently !!! no buffer!!! +! exchange_elem3D(elem_array3D) WP +! exchange_elem2d_full +! exchange_elem2d_full_i +! ======================================================================== + +module g_comm + + use, intrinsic :: ISO_FORTRAN_ENV + + implicit none + +contains + +#ifdef DEBUG +! General version of the communication routine for 2D nodal fields +! Only needed in debug mode +subroutine check_mpi_comm(rn, sn, r_mpitype, s_mpitype, rPE, sPE, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(in) :: sn, rn, r_mpitype(:), s_mpitype(:), rPE(:), sPE(:) +integer :: n, sdebug, rdebug, status(MPI_STATUS_SIZE), request +#include "associate_part_def.h" +#include "associate_part_ass.h" +DO n=1,rn + CALL MPI_TYPE_SIZE(r_mpitype(n), rdebug, MPIerr) + CALL MPI_ISEND(rdebug, 1, MPI_INTEGER, rPE(n), 10, MPI_COMM_FESOM, request, MPIerr) +END DO +DO n=1, sn + call MPI_RECV(sdebug, 1, MPI_INTEGER, sPE(n), 10, MPI_COMM_FESOM, & + status, MPIerr) + call MPI_TYPE_SIZE(s_mpitype(n), rdebug, MPIerr) + if (sdebug /= rdebug) then + print *, "Mismatching MPI send/recieve message lengths." + print *,"Send/receive process numbers: ", mype, '/', sPE(n) + print *,"Number of send/receive bytes: ", sdebug, '/', rdebug + call MPI_ABORT( MPI_COMM_FESOM, 1 ) + end if +END DO +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +END SUBROUTINE check_mpi_comm +#endif + + +subroutine exchange_nod2D_i(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: nod_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" +if (npes > 1) then + call exchange_nod2D_i_begin(nod_array2D, partit) + call exchange_nod_end(partit) +endif +END SUBROUTINE exchange_nod2D_i + +!============================================================================= +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_i_begin(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: nod_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod2D_i, s_mpitype_nod2D_i, & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + + call MPI_IRECV(nod_array2D, 1, r_mpitype_nod2D_i(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) + END DO + + DO n=1, sn + + call MPI_ISEND(nod_array2D, 1, s_mpitype_nod2D_i(n), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) + END DO + + com_nod2D%nreq = rn+sn + + endif +END SUBROUTINE exchange_nod2D_i_begin + +! ======================================================================== +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + call exchange_nod2D_begin(nod_array2D, partit) + call exchange_nod_end(partit) + end if + +END SUBROUTINE exchange_nod2D + +! ======================================================================== +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_begin(nod_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod2D, s_mpitype_nod2D, & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(nod_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) + END DO + + com_nod2D%nreq = rn+sn + + end if + +END SUBROUTINE exchange_nod2D_begin +!=============================================== +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_2fields(nod1_array2D, nod2_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + + if (npes > 1) then + call exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) + call exchange_nod_end(partit) + end if + +END SUBROUTINE exchange_nod2D_2fields + +! ======================================================================== +! General version of the communication routine for 2D nodal fields +subroutine exchange_nod2D_2fields_begin(nod1_array2D, nod2_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes > 1) then + + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod2D, s_mpitype_nod2D, & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod1_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(2*n-1), MPIerr) + + call MPI_IRECV(nod2_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n)+npes, MPI_COMM_FESOM, com_nod2D%req(2*n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(nod1_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(2*rn+2*n-1), MPIerr) + + call MPI_ISEND(nod2_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype+npes, MPI_COMM_FESOM, com_nod2D%req(2*rn+2*n), MPIerr) + END DO + + com_nod2D%nreq = 2*(rn+sn) + +end if + +END SUBROUTINE exchange_nod2D_2fields_begin + +!=============================================== +subroutine exchange_nod2D_3fields(nod1_array2D, nod2_array2D, nod3_array2D, partit) +! General version of the communication routine for 2D nodal fields +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +real(real64), intent(inout) :: nod3_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + + if (npes > 1) then + call exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D, partit) + call exchange_nod_end(partit) + end if + +END SUBROUTINE exchange_nod2D_3fields + +! ======================================================================== +subroutine exchange_nod2D_3fields_begin(nod1_array2D, nod2_array2D, nod3_array2D, partit) +! General version of the communication routine for 2D nodal fields +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array2D(:) +real(real64), intent(inout) :: nod2_array2D(:) +real(real64), intent(inout) :: nod3_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod2D, s_mpitype_nod2D, & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod1_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(3*n-2), MPIerr) + + call MPI_IRECV(nod2_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n)+npes, MPI_COMM_FESOM, com_nod2D%req(3*n-1), MPIerr) + + call MPI_IRECV(nod3_array2D, 1, r_mpitype_nod2D(n), com_nod2D%rPE(n), & + com_nod2D%rPE(n)+2*npes, MPI_COMM_FESOM, com_nod2D%req(3*n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(nod1_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(3*rn+3*n-2), MPIerr) + + call MPI_ISEND(nod2_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype+npes, MPI_COMM_FESOM, com_nod2D%req(3*rn+3*n-1), MPIerr) + + call MPI_ISEND(nod3_array2D, 1, s_mpitype_nod2D(n), com_nod2D%sPE(n), & + mype+2*npes, MPI_COMM_FESOM, com_nod2D%req(3*rn+3*n), MPIerr) + END DO + + com_nod2D%nreq = 3*(rn+sn) + +end if + +END SUBROUTINE exchange_nod2D_3fields_begin + +! ======================================================================== +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +subroutine exchange_nod3D(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:) + +if (partit%npes > 1) then + call exchange_nod3D_begin(nod_array3D, partit) + call exchange_nod_end(partit) +endif + +END SUBROUTINE exchange_nod3D + +! ======================================================================== +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +subroutine exchange_nod3D_begin(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:) +integer :: n, sn, rn +integer :: nz, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + nl1=ubound(nod_array3D,1) + + if ((nl1ubound(r_mpitype_nod3D, 2))) then + if (mype==0) then + print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(partit, 1) + endif + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod3D(:,nl1,1), s_mpitype_nod3D(:,nl1,1), & + com_nod2D%rPE, com_nod2D%sPE) +#endif + DO n=1,rn + call MPI_IRECV(nod_array3D, 1, r_mpitype_nod3D(n,nl1,1), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(nod_array3D, 1, s_mpitype_nod3D(n,nl1,1), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) + END DO + com_nod2D%nreq = rn+sn + + endif +END SUBROUTINE exchange_nod3D_begin + +! ======================================================================== +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +subroutine exchange_nod3D_2fields(nod1_array3D,nod2_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array3D(:,:) +real(real64), intent(inout) :: nod2_array3D(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes > 1) then + call exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) + call exchange_nod_end(partit) +endif +END SUBROUTINE exchange_nod3D_2fields + +! ======================================================================== +subroutine exchange_nod3D_2fields_begin(nod1_array3D,nod2_array3D, partit) +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod1_array3D(:,:) +real(real64), intent(inout) :: nod2_array3D(:,:) +integer :: n, sn, rn +integer :: nz, nl1, nl2 +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + nl1 = ubound(nod1_array3D,1) + + if ((nl1ubound(r_mpitype_nod3D, 2))) then + if (mype==0) then + print *,'Subroutine exchange_nod3D not implemented for',nl1,'layers.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(1) + endif + + nl2 = ubound(nod2_array3D,1) + if ((nl2ubound(r_mpitype_nod3D, 2))) then + if (mype==0) then + print *,'Subroutine exchange_nod3D not implemented for',nl2,'layers.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(1) + endif + +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod3D(:,nl1,1), s_mpitype_nod3D(:,nl1,1), & + com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod1_array3D, 1, r_mpitype_nod3D(n,nl1,1), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(2*n-1), MPIerr) + + call MPI_IRECV(nod2_array3D, 1, r_mpitype_nod3D(n,nl2,1), com_nod2D%rPE(n), & + com_nod2D%rPE(n)+npes, MPI_COMM_FESOM, com_nod2D%req(2*n ), MPIerr) + END DO + + DO n=1, sn + call MPI_ISEND(nod1_array3D, 1, s_mpitype_nod3D(n,nl1,1), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(2*rn+2*n-1), MPIerr) + + call MPI_ISEND(nod2_array3D, 1, s_mpitype_nod3D(n,nl2,1), com_nod2D%sPE(n), & + mype+npes, MPI_COMM_FESOM, com_nod2D%req(2*rn+2*n), MPIerr) + END DO + + com_nod2D%nreq = 2*(rn+sn) + + endif +END SUBROUTINE exchange_nod3D_2fields_begin +! ======================================================================== +subroutine exchange_nod3D_n(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:,:) +if (partit%npes>1) then + call exchange_nod3D_n_begin(nod_array3D, partit) + call exchange_nod_end(partit) +endif + +END SUBROUTINE exchange_nod3D_n + +!================================================= +! General version of the communication routine for 3D nodal fields +! stored in (vertical, horizontal) format +subroutine exchange_nod3D_n_begin(nod_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: nod_array3D(:,:,:) +integer :: n, sn, rn +integer :: nz, nl1, n_val +#include "associate_part_def.h" +#include "associate_part_ass.h" +if (npes>1) then + ! nod_array3D(n_val,nl1,nod2D_size) + nl1 = ubound(nod_array3D,2) + n_val = ubound(nod_array3D,1) + if ((nl1ubound(r_mpitype_nod3D, 2)) .or. (n_val > 3)) then + ! This routine also works for swapped dimensions nod_array3D(nl1,n_val, nod2D_size) + nl1 = ubound(nod_array3D,1) + n_val = ubound(nod_array3D,2) + + if ((nl1ubound(r_mpitype_nod3D, 2)) .or. (n_val > 3)) then + if (mype==0) then + print *,'Subroutine exchange_nod3D_n not implemented for' + print *,nl1,'layers and / or ',n_val,'values per element.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(1) + endif + endif + sn=com_nod2D%sPEnum + rn=com_nod2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_nod3D(:,nl1,n_val), & + s_mpitype_nod3D(:,nl1,n_val), com_nod2D%rPE, com_nod2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(nod_array3D, 1, r_mpitype_nod3D(n,nl1,n_val), com_nod2D%rPE(n), & + com_nod2D%rPE(n), MPI_COMM_FESOM, com_nod2D%req(n), MPIerr) + END DO + + DO n=1, sn + call MPI_ISEND(nod_array3D, 1, s_mpitype_nod3D(n,nl1,n_val), com_nod2D%sPE(n), & + mype, MPI_COMM_FESOM, com_nod2D%req(rn+n), MPIerr) + END DO + + com_nod2D%nreq = rn+sn + + endif + + +END SUBROUTINE exchange_nod3D_n_begin + +!======================================= +! AND WAITING +!======================================= + +SUBROUTINE exchange_nod_end(partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit + +if (partit%npes > 1) & + call MPI_WAITALL(partit%com_nod2D%nreq, partit%com_nod2D%req, MPI_STATUSES_IGNORE, partit%MPIerr) + +END SUBROUTINE exchange_nod_end + +SUBROUTINE exchange_elem_end(partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes > 1) then + if (elem_full_flag) then + call MPI_WAITALL(com_elem2D_full%nreq, & + com_elem2D_full%req, MPI_STATUSES_IGNORE, MPIerr) + else + call MPI_WAITALL(com_elem2D%nreq, & + com_elem2D%req, MPI_STATUSES_IGNORE, MPIerr) + endif + end if +END SUBROUTINE exchange_elem_end +!============================================================================= +subroutine exchange_elem3D(elem_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +call exchange_elem3D_begin(elem_array3D, partit) +call exchange_elem_end(partit) + +END SUBROUTINE exchange_elem3D +!=========================================== +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +subroutine exchange_elem3D_begin(elem_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:) +integer :: n, sn, rn, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + + nl1=ubound(elem_array3D,1) + + if (ubound(elem_array3D,2)<=myDim_elem2D+eDim_elem2D) then + + elem_full_flag = .false. + + sn=com_elem2D%sPEnum + rn=com_elem2D%rPEnum + + if (nl1==ubound(r_mpitype_elem3D, 2) .or. nl1==ubound(r_mpitype_elem3D, 2)-1) then + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem3D(:,nl1,1), s_mpitype_elem3D(:,nl1,1), & + com_elem2D%rPE, com_elem2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem3D(n,nl1,1), com_elem2D%rPE(n), & + com_elem2D%rPE(n), MPI_COMM_FESOM, & + com_elem2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem3D(n,nl1,1), com_elem2D%sPE(n), & + mype, MPI_COMM_FESOM, & + com_elem2D%req(rn+n), MPIerr) + END DO + + elseif (nl1 <= 4) then + ! In fact, this is a 2D-array with up to 4 values, e.g. derivatives + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D(:,nl1), s_mpitype_elem2D(:,nl1), & + com_elem2D%rPE, com_elem2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem2D(n,nl1), com_elem2D%rPE(n), & + com_elem2D%rPE(n), MPI_COMM_FESOM, & + com_elem2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem2D(n,nl1), com_elem2D%sPE(n), & + mype, MPI_COMM_FESOM, & + com_elem2D%req(rn+n), MPIerr) + END DO + else + if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' + call par_ex(1) + endif + + com_elem2D%nreq = rn+sn + + else + + elem_full_flag = .true. + + sn=com_elem2D_full%sPEnum + rn=com_elem2D_full%rPEnum + + if (nl1==ubound(r_mpitype_elem3D_full, 2) .or. nl1==ubound(r_mpitype_elem3D_full, 2)-1) then + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem3D_full(:,nl1,1), & + s_mpitype_elem3D_full(:,nl1,1), com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem3D_full(n,nl1,1), & + com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, & + com_elem2D_full%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem3D_full(n,nl1,1), & + com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, & + com_elem2D_full%req(rn+n), MPIerr) + END DO + elseif (nl1 <= 4) then + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D_full(:,nl1), & + s_mpitype_elem2D_full(:,nl1), com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + ! In fact, this is a 2D-array with up to 4 values, e.g. derivatives + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem2D_full(n,nl1), & + com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, & + com_elem2D_full%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem2D_full(n,nl1), & + com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, & + com_elem2D_full%req(rn+n), MPIerr) + END DO + else + if (mype==0) print *,'Sorry, no MPI datatype prepared for',nl1,'values per element (exchange_elem3D)' + call par_ex(1) + endif + + com_elem2D_full%nreq = rn+sn + + endif + +endif + +END SUBROUTINE exchange_elem3D_begin + +!============================================================================= +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +subroutine exchange_elem3D_n(elem_array3D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + call exchange_elem3D_n_begin(elem_array3D, partit) + call exchange_elem_end(partit) + endif +END SUBROUTINE exchange_elem3D_n +!============================================================================= +subroutine exchange_elem3D_n_begin(elem_array3D, partit) +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array3D(:,:,:) +integer :: n, sn, rn, n_val, nl1 +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + nl1 = ubound(elem_array3D,2) + n_val = ubound(elem_array3D,1) + + if ((nl1ubound(r_mpitype_elem3D, 2)) .or. (n_val > 4)) then + + ! This routine also works for swapped dimensions elem_array3D(nl1,n_val, elem2D_size) + nl1= ubound(elem_array3D,1) + n_val = ubound(elem_array3D,2) + + if ((nl1ubound(r_mpitype_elem3D, 2)) .or. (n_val > 4)) then + if (mype==0) then + print *,'Subroutine exchange_elem3D_n not implemented for' + print *,nl1,'layers and / or ',n_val,'values per element.' + print *,'Adding the MPI datatypes is easy, see oce_modules.F90.' + endif + call par_ex(1) + endif + endif + + if (ubound(elem_array3D,3)<=myDim_elem2D+eDim_elem2D) then + + elem_full_flag = .false. + + sn=com_elem2D%sPEnum + rn=com_elem2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem3D(:,nl1,n_val), & + s_mpitype_elem3D(:,nl1,n_val), com_elem2D%rPE, com_elem2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem3D(n,nl1,n_val), com_elem2D%rPE(n), & + com_elem2D%rPE(n), MPI_COMM_FESOM, com_elem2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem3D(n,nl1,n_val), com_elem2D%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D%req(rn+n), MPIerr) + END DO + + com_elem2D%nreq = rn+sn + + else + + elem_full_flag = .true. + + sn=com_elem2D_full%sPEnum + rn=com_elem2D_full%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem3D_full(:,nl1,n_val), & + s_mpitype_elem3D_full(:,nl1,n_val), com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array3D, 1, r_mpitype_elem3D_full(n,nl1,n_val), com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, com_elem2D_full%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array3D, 1, s_mpitype_elem3D_full(n,nl1,n_val), com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D_full%req(rn+n), MPIerr) + END DO + + com_elem2D_full%nreq = rn+sn + + end if + + +endif +END SUBROUTINE exchange_elem3D_n_begin +!======================================================================== +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +subroutine exchange_elem2D(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array2D(:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + call exchange_elem2D_begin(elem_array2D, partit) + call exchange_elem_end(partit) + end if + +END SUBROUTINE exchange_elem2D +!======================================================================== +! General version of the communication routine for 3D elemental fields +! stored in (vertical, horizontal) format +subroutine exchange_elem2D_begin(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +real(real64), intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + + if (ubound(elem_array2D,1)<=myDim_elem2D+eDim_elem2D) then + + elem_full_flag = .false. + + sn=com_elem2D%sPEnum + rn=com_elem2D%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D(:,1), s_mpitype_elem2D(:,1), & + com_elem2D%rPE, com_elem2D%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array2D, 1, r_mpitype_elem2D(n,1), com_elem2D%rPE(n), & + com_elem2D%rPE(n), MPI_COMM_FESOM, com_elem2D%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array2D, 1, s_mpitype_elem2D(n,1), com_elem2D%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D%req(rn+n), MPIerr) + END DO + + com_elem2D%nreq = rn+sn + + else + elem_full_flag = .true. + + sn=com_elem2D_full%sPEnum + rn=com_elem2D_full%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D_full(:,1), s_mpitype_elem2D_full(:,1), & + com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array2D, 1, r_mpitype_elem2D_full(n,1), com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, com_elem2D_full%req(n), MPIerr) + END DO + DO n=1, sn + call MPI_ISEND(elem_array2D, 1, s_mpitype_elem2D_full(n,1), com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D_full%req(rn+n), MPIerr) + END DO + + com_elem2D_full%nreq = rn+sn + + end if + +end if + +END SUBROUTINE exchange_elem2D_begin +! ======================================================================== +!Exchange with ALL(!) the neighbours +subroutine exchange_elem2D_i(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + call exchange_elem2D_i_begin(elem_array2D, partit) + call exchange_elem_end(partit) +end if + +END SUBROUTINE exchange_elem2D_i +!============================================================================= +!Exchange with ALL(!) the neighbours +subroutine exchange_elem2D_i_begin(elem_array2D, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer, intent(inout) :: elem_array2D(:) +integer :: n, sn, rn +#include "associate_part_def.h" +#include "associate_part_ass.h" + + if (npes> 1) then + + elem_full_flag = .true. + + sn=com_elem2D_full%sPEnum + rn=com_elem2D_full%rPEnum + + ! Check MPI point-to-point communication for consistency +#ifdef DEBUG + call check_mpi_comm(rn, sn, r_mpitype_elem2D_full_i, s_mpitype_elem2D_full_i, & + com_elem2D_full%rPE, com_elem2D_full%sPE) +#endif + + DO n=1,rn + call MPI_IRECV(elem_array2D, 1, r_mpitype_elem2D_full_i(n), com_elem2D_full%rPE(n), & + com_elem2D_full%rPE(n), MPI_COMM_FESOM, com_elem2D_full%req(n), MPIerr) + END DO + + DO n=1, sn + + call MPI_ISEND(elem_array2D, 1, s_mpitype_elem2D_full_i(n), com_elem2D_full%sPE(n), & + mype, MPI_COMM_FESOM, com_elem2D_full%req(rn+n), MPIerr) + END DO + + com_elem2D_full%nreq = rn+sn + +end if + +END SUBROUTINE exchange_elem2D_i_begin +! ======================================================================== +! Broadcast routines +! Many because of different sizes. +! ======================================================================== +subroutine broadcast_nod3D(arr3D, arr3Dglobal, partit) +! Distribute the nodal information available on 0 PE to other PEs +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nz, counter,nl1 +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr3D(:,:) +real(real64) :: arr3Dglobal(:,:) +integer :: node_size +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf, recvbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +node_size=myDim_nod2D+eDim_nod2D +nl1=ubound(arr3D,1) +IF ( mype == 0 ) THEN + if (npes>1) then + arr3D(:,1:node_size)=arr3Dglobal(:,myList_nod2D(1:node_size)) + end if + DO n = 1, npes-1 + CALL MPI_RECV( nTS, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(sendbuf(nTS*nl1), irecvbuf(nTS)) + + CALL MPI_RECV(irecvbuf(1), nTS, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + counter=0 + DO i = 1, nTS + DO nz=1, nl1 + counter=counter+1 + sendbuf(counter) = arr3Dglobal(nz,irecvbuf(i)) + ENDDO + ENDDO + + CALL MPI_SEND(sendbuf(1), nTS*nl1, MPI_DOUBLE_PRECISION, & + sender, 2, MPI_COMM_FESOM, MPIerr ) + + DEALLOCATE(irecvbuf, sendbuf) + ENDDO +ELSE + CALL MPI_SEND( node_size, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_nod2D(1), node_size, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + + ALLOCATE(recvbuf(node_size*nl1)) + CALL MPI_RECV( recvbuf(1), node_size*nl1, MPI_DOUBLE_PRECISION, 0, & + 2, MPI_COMM_FESOM, status, MPIerr ) + counter=0 + DO n = 1, node_size + DO nz=1, nl1 + counter=counter+1 + arr3D(nz,n)=recvbuf(counter) + ENDDO + ENDDO + + DEALLOCATE(recvbuf) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine broadcast_nod3D +! +!============================================================================ +! +subroutine broadcast_nod2D(arr2D, arr2Dglobal, partit) +! A 2D version of the previous routine +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(in), target :: partit +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf +integer :: node_size +#include "associate_part_def.h" +#include "associate_part_ass.h" + +node_size=myDim_nod2D+eDim_nod2D + +IF ( mype == 0 ) THEN + if (npes>1) then + arr2D(1:node_size)=arr2Dglobal(myList_nod2D(1:node_size)) + end if + DO n = 1, npes-1 + CALL MPI_RECV( nTS, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(sendbuf(nTS), irecvbuf(nTS)) + + CALL MPI_RECV(irecvbuf(1), nTS, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + DO i = 1, nTS + sendbuf(i) = arr2Dglobal(irecvbuf(i)) + ENDDO + + CALL MPI_SEND(sendbuf(1), nTS, MPI_DOUBLE_PRECISION, & + sender, 2, MPI_COMM_FESOM, MPIerr ) + + DEALLOCATE(irecvbuf, sendbuf) + ENDDO +ELSE + CALL MPI_SEND( node_size, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_nod2D(1), node_size, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + CALL MPI_RECV( arr2D(1), node_size, MPI_DOUBLE_PRECISION, 0, & + 2, MPI_COMM_FESOM, status, MPIerr ) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine broadcast_nod2D +! +!============================================================================ +! +subroutine broadcast_elem3D(arr3D, arr3Dglobal, partit) +! Distribute the elemental information available on 0 PE to other PEs +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(in), target :: partit +INTEGER :: nz, counter,nl1 +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr3D(:,:) +real(real64) :: arr3Dglobal(:,:) +integer :: elem_size + +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf, recvbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +elem_size=myDim_elem2D+eDim_elem2D + +nl1=ubound(arr3D,1) +IF ( mype == 0 ) THEN + if (npes>1) then + arr3D(:,1:elem_size)=arr3Dglobal(:,myList_elem2D(1:elem_size)) + end if + DO n = 1, npes-1 + CALL MPI_RECV( nTS, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(sendbuf(nTS*nl1), irecvbuf(nTS)) + + CALL MPI_RECV(irecvbuf(1), nTS, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + counter=0 + DO i = 1, nTS + DO nz=1, nl1 + counter=counter+1 + sendbuf(counter) = arr3Dglobal(nz,irecvbuf(i)) + ENDDO + ENDDO + + CALL MPI_SEND(sendbuf(1), nTS*nl1, MPI_DOUBLE_PRECISION, & + sender, 2, MPI_COMM_FESOM, MPIerr ) + + DEALLOCATE(irecvbuf, sendbuf) + ENDDO +ELSE + CALL MPI_SEND( elem_size, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_elem2D(1), elem_size, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + + ALLOCATE(recvbuf(elem_size*nl1)) + CALL MPI_RECV( recvbuf(1), elem_size*nl1, MPI_DOUBLE_PRECISION, 0, & + 2, MPI_COMM_FESOM, status, MPIerr ) + counter=0 + DO n = 1, elem_size + DO nz=1, nl1 + counter=counter+1 + arr3D(nz,n)=recvbuf(counter) + ENDDO + ENDDO + + DEALLOCATE(recvbuf) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine broadcast_elem3D +! +!============================================================================ +! +subroutine broadcast_elem2D(arr2D, arr2Dglobal, partit) +! A 2D version of the previous routine +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(in), target :: partit +integer :: i, n, nTS, sender, status(MPI_STATUS_SIZE) +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: elem_size +INTEGER, ALLOCATABLE, DIMENSION(:) :: irecvbuf +real(real64), ALLOCATABLE, DIMENSION(:) :: sendbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +elem_size=myDim_elem2D+eDim_elem2D + +IF ( mype == 0 ) THEN + if (npes>1) then + arr2D(1:elem_size)=arr2Dglobal(myList_elem2D(1:elem_size)) + end if + DO n = 1, npes-1 + CALL MPI_RECV( nTS, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(sendbuf(1:nTS), irecvbuf(nTS)) + + CALL MPI_RECV(irecvbuf(1), nTS, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + DO i = 1, nTS + sendbuf(i) = arr2Dglobal(irecvbuf(i)) + ENDDO + + CALL MPI_SEND(sendbuf(1), nTS, MPI_DOUBLE_PRECISION, & + sender, 2, MPI_COMM_FESOM, MPIerr ) + + DEALLOCATE(irecvbuf, sendbuf) + ENDDO +ELSE + CALL MPI_SEND( elem_size, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_elem2D(1), elem_size, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + CALL MPI_RECV( arr2D(1), elem_size, MPI_DOUBLE_PRECISION, 0, & + 2, MPI_COMM_FESOM, status, MPIerr ) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine broadcast_elem2D +! +!============================================================================ +! Make nodal information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +subroutine gather_nod3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real64) :: arr3D_global(:,:) +real(real64), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1,ubound(arr3D_global,2))) + + do n = 1, npes-1 + n3D = (remPtr_nod2D(n+1) - remPtr_nod2D(n))*nl1 + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(1,start), n3D, MPI_DOUBLE_PRECISION, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_nod2D(1:myDim_nod2D)) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_nod2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_nod2D*nl1, MPI_DOUBLE_PRECISION, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_nod3D +! +!============================================================================ +! +subroutine gather_real4_nod3D(arr3D, arr3D_global, partit) + +! Make nodal information available to master PE +! +! Use only with 3D arrays stored in (vertical, horizontal) way +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real32) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +real(real32), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1,ubound(arr3D_global,2))) + + do n = 1, npes-1 + n3D = (remPtr_nod2D(n+1) - remPtr_nod2D(n))*nl1 + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(1,start), n3D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_nod2D(1:myDim_nod2D)) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_nod2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_nod2D*nl1, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_real4_nod3D +!======================================================= + +subroutine gather_int2_nod3D(arr3D, arr3D_global, partit) + +! Make nodal information available to master PE +! +! Use only with 3D arrays stored in (vertical, horizontal) way +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +integer(int16) :: arr3D(:,:) +integer(int16) :: arr3D_global(:,:) +integer(int16), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1,ubound(arr3D_global,2))) + + do n = 1, npes-1 + n3D = (remPtr_nod2D(n+1) - remPtr_nod2D(n))*nl1 + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(1,start), n3D, MPI_SHORT, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_nod2D(1:myDim_nod2D)) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_nod2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_nod2D*nl1, MPI_SHORT, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_int2_nod3D +!============================================== +subroutine gather_nod2D(arr2D, arr2D_global, partit) +! Make nodal information available to master PE +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real64) :: arr2D_global(:) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global,1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_DOUBLE_PRECISION, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_nod2D, MPI_DOUBLE_PRECISION, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_nod2D +!============================================== +subroutine gather_real4_nod2D(arr2D, arr2D_global, partit) +! Make nodal information available to master PE +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real32) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global,1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_nod2D, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_real4_nod2D + +!============================================== +! Make nodal information available to master PE +subroutine gather_int2_nod2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer(int16) :: arr2D(:) +integer(int16) :: arr2D_global(:) +integer(int16), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global,1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_SHORT, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_nod2D, MPI_SHORT, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_int2_nod2D + +!============================================================================ +subroutine gather_elem3D(arr3D, arr3D_global, partit) +! Make element information available to master PE +! +! Use only with 3D arrays stored in (vertical, horizontal) way +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real64) :: arr3D_global(:,:) +real(real64), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global +! (Carefull with duplicate interface elements, coming from two +! PEs at once!) + +IF ( mype == 0 ) THEN + + if (npes>1) then +! + allocate(recvbuf(nl1,remPtr_elem2D(npes))) + + do n = 1, npes-1 + e3D = (remPtr_elem2D(n+1) - remPtr_elem2D(n))*nl1 + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(1,start), e3D, MPI_DOUBLE_PRECISION, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_elem2D(1:myDim_elem2D)) = arr3D(1:nl1,1:myDim_elem2D) + + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_elem2D*nl1, MPI_DOUBLE_PRECISION, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_elem3D + +!=================================================================== +! Make element information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +subroutine gather_real4_elem3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real32) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +real(real32), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global +! (Carefull with duplicate interface elements, coming from two +! PEs at once!) + +IF ( mype == 0 ) THEN + + if (npes>1) then +! + allocate(recvbuf(nl1,remPtr_elem2D(npes))) + + do n = 1, npes-1 + e3D = (remPtr_elem2D(n+1) - remPtr_elem2D(n))*nl1 + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(1,start), e3D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_elem2D(1:myDim_elem2D)) = arr3D(1:nl1,1:myDim_elem2D) + + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_elem2D*nl1, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_real4_elem3D + + +!=================================================================== +! Make element information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +subroutine gather_int2_elem3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +integer(int16) :: arr3D(:,:) +integer(int16) :: arr3D_global(:,:) +integer(int16), allocatable :: recvbuf(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D, ende, err_alloc +integer :: max_loc_Dim, i, status(MPI_STATUS_SIZE) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global +! (Carefull with duplicate interface elements, coming from two +! PEs at once!) + +IF ( mype == 0 ) THEN + + if (npes>1) then +! + allocate(recvbuf(nl1,remPtr_elem2D(npes))) + + do n = 1, npes-1 + e3D = (remPtr_elem2D(n+1) - remPtr_elem2D(n))*nl1 + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(1,start), e3D, MPI_SHORT, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_elem2D(1:myDim_elem2D)) = arr3D(1:nl1,1:myDim_elem2D) + + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + call MPI_SEND( arr3D, myDim_elem2D*nl1, MPI_SHORT, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_int2_elem3D + + +!============================================== +! Make element information available to master PE +subroutine gather_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real64) :: arr2D_global(:) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + + allocate(recvbuf(remPtr_elem2D(npes))) + + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_DOUBLE_PRECISION, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_elem2D, MPI_DOUBLE_PRECISION, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF +end if + +end subroutine gather_elem2D + +!================================================ +! Make element information available to master PE +subroutine gather_real4_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real32) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + + allocate(recvbuf(remPtr_elem2D(npes))) + + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_elem2D, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF +end if + +end subroutine gather_real4_elem2D + +!================================================ +! Make element information available to master PE +subroutine gather_int2_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer(int16) :: arr2D(:) +integer(int16) :: arr2D_global(:) +integer(int16), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + + allocate(recvbuf(remPtr_elem2D(npes))) + + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_SHORT, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_elem2D, MPI_SHORT, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF +end if + +end subroutine gather_int2_elem2D + + +!============================================================================ +! Make nodal information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +subroutine gather_real8to4_nod3D(arr3D, arr3D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +integer :: req(partit%npes-1) +integer :: start, n3D, ierr +real(real32), allocatable :: recvbuf(:,:) +real(real32), allocatable :: sendbuf(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1, ubound(arr3D_global,2))) + + do n = 1, npes-1 + n3D = (remPtr_nod2D(n+1) - remPtr_nod2D(n))*nl1 + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(1,start), n3D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_nod2D(1:myDim_nod2D)) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_nod2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + + allocate(sendbuf(nl1,myDim_nod2D)) + sendbuf(1:nl1,1:myDim_nod2D) = arr3D(1:nl1,1:myDim_nod2D) + + call MPI_SEND(sendbuf, myDim_nod2D*nl1, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + deallocate(sendbuf) + +ENDIF + +end if + +end subroutine gather_real8to4_nod3D +!============================================== +! Make nodal information available to master PE +subroutine gather_real8to4_nod2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32) :: sendbuf(partit%myDim_nod2D) +real(real64), allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +! Consider MPI-datatypes to recv directly into arr2D_global! + + if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global,1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + sendbuf(1:myDim_nod2D) = real(arr2D(1:myDim_nod2D),real32) + + call MPI_SEND(sendbuf, myDim_nod2D, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_real8to4_nod2D +!============================================================================ +subroutine gather_real8to4_elem3D(arr3D, arr3D_global, partit) +! Make element information available to master PE +! Use only with 3D arrays stored in (vertical, horizontal) way +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +INTEGER :: nl1 +integer :: n +real(real64) :: arr3D(:,:) +real(real32) :: arr3D_global(:,:) +integer :: req(partit%npes-1) +integer :: start, e3D +real(real32), allocatable :: recvbuf(:,:) +real(real32), allocatable :: sendbuf(:,:) +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +if (npes> 1) then +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +nl1=ubound(arr3D,1) + +! Consider MPI-datatypes to recv directly into arr3D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(nl1,remPtr_elem2D(npes))) + + do n = 1, npes-1 + e3D = (remPtr_elem2D(n+1) - remPtr_elem2D(n))*nl1 + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(1,start), e3D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr3D_global(1:nl1,myList_elem2D(1:myDim_elem2D)) = arr3D(1:nl1,1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr3D_global(1:nl1, remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1:nl1, 1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + arr3D_global(:,:) = arr3D(:,:) + endif + +ELSE + allocate(sendbuf(nl1,myDim_elem2D)) + sendbuf(1:nl1,1:myDim_elem2D) = arr3D(1:nl1,1:myDim_elem2D) + + call MPI_SEND(sendbuf, myDim_elem2D*nl1, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + deallocate(sendbuf) +ENDIF + +end if +end subroutine gather_real8to4_elem3D +!================================================ +! Make element information available to master PE +subroutine gather_real8to4_elem2D(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +real(real64) :: arr2D(:) +real(real32) :: arr2D_global(:) +real(real32), allocatable :: recvbuf(:) +real(real32) :: sendbuf(partit%myDim_elem2D) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(remPtr_elem2D(npes))) + + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_REAL, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + + deallocate(recvbuf) + + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + sendbuf(1:myDim_elem2D) = real(arr2D(1:myDim_elem2D),real32) + call MPI_SEND(sendbuf, myDim_elem2D, MPI_REAL, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +end if +end subroutine gather_real8to4_elem2D +!============================================== +subroutine gather_elem2D_i(arr2D, arr2D_global, partit) +! Make element information available to master PE +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer :: arr2D(:) +integer :: arr2D_global(:) +integer, allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, e2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + ! Consider MPI-datatypes to recv directly into arr2D_global! + IF ( mype == 0 ) THEN + if (npes > 1) then + allocate(recvbuf(remPtr_elem2D(npes))) + do n = 1, npes-1 + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + start = remPtr_elem2D(n) + call MPI_IRECV(recvbuf(start), e2D, MPI_INTEGER, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + arr2D_global(myList_elem2D(1:myDim_elem2D)) = arr2D(1:myDim_elem2D) + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + arr2D_global(remList_elem2D(1 : remPtr_elem2D(npes)-1)) & + = recvbuf(1 : remPtr_elem2D(npes)-1) + deallocate(recvbuf) + else + arr2D_global(:) = arr2D(:) + endif + ELSE + call MPI_SEND(arr2D, myDim_elem2D, MPI_INTEGER, 0, 2, MPI_COMM_FESOM, MPIerr ) + ENDIF +end subroutine gather_elem2D_i +!============================================== +! Make nodal information available to master PE +subroutine gather_nod2D_i(arr2D, arr2D_global, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: n +integer :: arr2D(:) +integer :: arr2D_global(:) +integer, allocatable :: recvbuf(:) +integer :: req(partit%npes-1) +integer :: start, n2D +#include "associate_part_def.h" +#include "associate_part_ass.h" + +if (npes> 1) then + +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) + +! Consider MPI-datatypes to recv directly into arr2D_global! + +IF ( mype == 0 ) THEN + + if (npes>1) then + allocate(recvbuf(ubound(arr2D_global, 1))) + do n = 1, npes-1 + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + start = remPtr_nod2D(n) + call MPI_IRECV(recvbuf(start), n2D, MPI_INTEGER, n, 2, MPI_COMM_FESOM, req(n), MPIerr) + enddo + + arr2D_global(myList_nod2D(1:myDim_nod2D)) = arr2D(1:myDim_nod2D) + + call MPI_WAITALL(npes-1, req, MPI_STATUSES_IGNORE, MPIerr) + + arr2D_global(remList_nod2D(1 : remPtr_nod2D(npes)-1)) & + = recvbuf(1 : remPtr_nod2D(npes)-1) + deallocate(recvbuf) + else + + arr2D_global(:) = arr2D(:) + + endif + +ELSE + + call MPI_SEND( arr2D, myDim_nod2D, MPI_INTEGER, 0, 2, MPI_COMM_FESOM, MPIerr ) + +ENDIF + +endif +end subroutine gather_nod2D_i +!============================================================================ +! A 2D version of the previous routine +subroutine gather_edg2D(arr2D, arr2Dglobal, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(in), target :: partit +real(real64) :: arr2D(:) +real(real64) :: arr2Dglobal(:) +integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf +REAL(real64), ALLOCATABLE, DIMENSION(:) :: rbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +IF ( mype == 0 ) THEN + arr2Dglobal(myList_edge2D(1:myDim_edge2D))=arr2D(1:myDim_edge2D) + DO n = 1, npes-1 + CALL MPI_RECV( buf_size, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(rbuf(buf_size), ibuf(buf_size)) + + CALL MPI_RECV(ibuf(1), buf_size, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + + CALL MPI_RECV(rbuf(1), buf_size, MPI_DOUBLE_PRECISION, sender, & + 2, MPI_COMM_FESOM, status, MPIerr ) + arr2Dglobal(ibuf)=rbuf + DEALLOCATE(ibuf, rbuf) + ENDDO +ELSE + CALL MPI_SEND( myDim_edge2D, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_edge2D(1), myDim_edge2D, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( arr2D(1), myDim_edge2D, MPI_DOUBLE_PRECISION, 0, 2,& + MPI_COMM_FESOM, MPIerr ) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine gather_edg2D +! +!============================================================================ +! A 2D version of the previous routine +subroutine gather_edg2D_i(arr2D, arr2Dglobal, partit) +use MOD_MESH +use MOD_PARTIT +IMPLICIT NONE +type(t_partit), intent(inout), target :: partit +integer :: arr2D(:) +integer :: arr2Dglobal(:) +integer :: i, n, buf_size, sender, status(MPI_STATUS_SIZE) +INTEGER, ALLOCATABLE, DIMENSION(:) :: ibuf, vbuf +#include "associate_part_def.h" +#include "associate_part_ass.h" + +IF ( mype == 0 ) THEN + arr2Dglobal(myList_edge2D(1:myDim_edge2D))=arr2D(1:myDim_edge2D) + DO n = 1, npes-1 + CALL MPI_RECV( buf_size, 1, MPI_INTEGER, MPI_ANY_SOURCE, & + 0, MPI_COMM_FESOM, status, MPIerr ) + sender = status(MPI_SOURCE) + ALLOCATE(ibuf(buf_size), vbuf(buf_size)) + + CALL MPI_RECV(ibuf(1), buf_size, MPI_INTEGER, sender, & + 1, MPI_COMM_FESOM, status, MPIerr ) + + CALL MPI_RECV(vbuf(1), buf_size, MPI_INTEGER, sender, & + 2, MPI_COMM_FESOM, status, MPIerr ) + arr2Dglobal(ibuf)=vbuf + DEALLOCATE(ibuf, vbuf) + ENDDO +ELSE + CALL MPI_SEND( myDim_edge2D, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( myList_edge2D(1), myDim_edge2D, MPI_INTEGER, 0, 1, & + MPI_COMM_FESOM, MPIerr ) + CALL MPI_SEND( arr2D(1), myDim_edge2D, MPI_INTEGER, 0, 2,& + MPI_COMM_FESOM, MPIerr ) +ENDIF +CALL MPI_BARRIER(MPI_COMM_FESOM,MPIerr) +end subroutine gather_edg2D_i +!============================================== + +end module g_comm + + + +module g_comm_auto +use g_comm +implicit none +interface exchange_nod + module procedure exchange_nod2D + module procedure exchange_nod2D_i + module procedure exchange_nod2D_2fields + module procedure exchange_nod2D_3fields + module procedure exchange_nod3D + module procedure exchange_nod3D_2fields + module procedure exchange_nod3D_n +end interface exchange_nod + +interface exchange_nod_begin + module procedure exchange_nod2D_begin + module procedure exchange_nod2D_i_begin + module procedure exchange_nod2D_2fields_begin + module procedure exchange_nod2D_3fields_begin + module procedure exchange_nod3D_begin + module procedure exchange_nod3D_2fields_begin + module procedure exchange_nod3D_n_begin +end interface exchange_nod_begin + +!!$interface exchange_edge +!!$ module procedure exchange_edge2D +!!$! module procedure exchange_edge3D ! not available, not used +!!$end interface exchange_edge + +interface exchange_elem + module procedure exchange_elem3D + module procedure exchange_elem3D_n + module procedure exchange_elem2d + module procedure exchange_elem2d_i +end interface exchange_elem + +interface exchange_elem_begin + module procedure exchange_elem3D_begin + module procedure exchange_elem3D_n_begin + module procedure exchange_elem2d_begin + module procedure exchange_elem2d_i_begin +end interface exchange_elem_begin + + +interface broadcast_nod + module procedure broadcast_nod3D + module procedure broadcast_nod2D +end interface broadcast_nod + +interface broadcast_elem + module procedure broadcast_elem3D + module procedure broadcast_elem2D +end interface broadcast_elem + +interface gather_nod + module procedure gather_nod3D + module procedure gather_nod2D + module procedure gather_real4_nod3D + module procedure gather_real4_nod2D + module procedure gather_int2_nod3D + module procedure gather_int2_nod2D + module procedure gather_real8to4_nod3D + module procedure gather_real8to4_nod2D + module procedure gather_nod2D_i +end interface gather_nod + +interface gather_elem + module procedure gather_elem3D + module procedure gather_elem2D + module procedure gather_real4_elem3D + module procedure gather_real4_elem2D + module procedure gather_int2_elem3D + module procedure gather_int2_elem2D + module procedure gather_real8to4_elem3D + module procedure gather_real8to4_elem2D + module procedure gather_elem2D_i +end interface gather_elem + +interface gather_edge + module procedure gather_edg2D + module procedure gather_edg2D_i +end interface gather_edge + + +private ! hides items not listed on public statement +public :: exchange_nod,exchange_elem,broadcast_nod,broadcast_elem, & + gather_nod, gather_elem, exchange_nod_begin, exchange_nod_end, exchange_elem_begin, & + exchange_elem_end, gather_edge +end module g_comm_auto diff --git a/src/temp/gen_modules_partitioning.F90 b/src/temp/gen_modules_partitioning.F90 new file mode 100644 index 000000000..cc7d3c080 --- /dev/null +++ b/src/temp/gen_modules_partitioning.F90 @@ -0,0 +1,508 @@ +module par_support_interfaces + interface + subroutine par_init(partit) + USE o_PARAM + USE MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit + end subroutine + + subroutine par_ex(partit, abort) + USE MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit + integer,optional :: abort + end subroutine + + subroutine set_par_support(partit, mesh) + use MOD_MESH + use MOD_PARTIT + implicit none + type(t_partit), intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + end subroutine + + subroutine init_gatherLists(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + end subroutine + end interface +end module + +subroutine par_init(partit) ! initializes MPI + USE o_PARAM + USE MOD_PARTIT + implicit none + type(t_partit), intent(inout), target :: partit + integer :: i + integer :: provided_mpi_thread_support_level + character(:), allocatable :: provided_mpi_thread_support_level_name + +#ifndef __oasis + call MPI_Comm_Size(MPI_COMM_WORLD,partit%npes,i) + call MPI_Comm_Rank(MPI_COMM_WORLD,partit%mype,i) + partit%MPI_COMM_FESOM=MPI_COMM_WORLD +#else + call MPI_Comm_Size(MPI_COMM_FESOM,partit%npes,i) + call MPI_Comm_Rank(MPI_COMM_FESOM,partit%mype,i) +#endif + + if(partit%mype==0) then + call MPI_Query_thread(provided_mpi_thread_support_level, i) + if(provided_mpi_thread_support_level == MPI_THREAD_SINGLE) then + provided_mpi_thread_support_level_name = "MPI_THREAD_SINGLE" + else if(provided_mpi_thread_support_level == MPI_THREAD_FUNNELED) then + provided_mpi_thread_support_level_name = "MPI_THREAD_FUNNELED" + else if(provided_mpi_thread_support_level == MPI_THREAD_SERIALIZED) then + provided_mpi_thread_support_level_name = "MPI_THREAD_SERIALIZED" + else if(provided_mpi_thread_support_level == MPI_THREAD_MULTIPLE) then + provided_mpi_thread_support_level_name = "MPI_THREAD_MULTIPLE" + else + provided_mpi_thread_support_level_name = "unknown" + end if + write(*,*) 'MPI has been initialized, provided MPI thread support level: ', & + provided_mpi_thread_support_level_name,provided_mpi_thread_support_level + write(*, *) 'Running on ', partit%npes, ' PEs' + end if +end subroutine par_init +!================================================================= +subroutine par_ex(partit, abort) ! finalizes MPI +USE MOD_PARTIT +#ifndef __oifs +!For standalone and coupled ECHAM runs +#if defined (__oasis) + use mod_prism +#endif + implicit none + type(t_partit), intent(inout), target :: partit + integer,optional :: abort + +#ifndef __oasis + if (present(abort)) then + if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT(partit%MPI_COMM_FESOM, 1 ) + else + call MPI_Barrier(partit%MPI_COMM_FESOM,partit%MPIerr) + call MPI_Finalize(partit%MPIerr) + endif +#else + if (.not. present(abort)) then + if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling prism_terminate' + call MPI_Barrier(MPI_COMM_WORLD, partit%MPIerr) + end if + call prism_terminate_proto(MPIerr) + if (partit%mype==0) print *, 'FESOM calls MPI_Barrier before calling MPI_Finalize' + call MPI_Barrier(MPI_COMM_WORLD, partit%MPIerr) + + if (partit%mype==0) print *, 'FESOM calls MPI_Finalize' + call MPI_Finalize(MPIerr) +#endif + if (partit%mype==0) print *, 'fesom should stop with exit status = 0' +#endif +#if defined (__oifs) +!OIFS coupling doesnt call prism_terminate_proto and uses MPI_COMM_FESOM + implicit none + integer,optional :: abort + if (present(abort)) then + if (partit%mype==0) write(*,*) 'Run finished unexpectedly!' + call MPI_ABORT( partit%MPI_COMM_FESOM, 1 ) + else + call MPI_Barrier(partit%MPI_COMM_FESOM,partit%MPIerr) + call MPI_Finalize(partit%MPIerr) + endif +#endif + +end subroutine par_ex +!======================================================================= +subroutine set_par_support(partit, mesh) + use MOD_MESH + use MOD_PARTIT + implicit none + + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: n, offset + integer :: i, max_nb, nb, nini, nend, nl1, n_val + integer, allocatable :: blocklen(:), displace(:) + integer, allocatable :: blocklen_tmp(:), displace_tmp(:) + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ! + ! In the distributed memory version, most of the job is already done + ! at the initialization phase and is taken into account in read_mesh + ! routine. Here, MPI datatypes are built and buffers for MPI wait requests + ! are allocated. + + if (npes > 1) then + +!================================================ +! MPI REQUEST BUFFERS +!================================================ + if (.not. allocated(com_nod2D%req)) allocate(com_nod2D%req( 3*com_nod2D%rPEnum + 3*com_nod2D%sPEnum)) + if (.not. allocated(com_elem2D%req)) allocate(com_elem2D%req( 3*com_elem2D%rPEnum + 3*com_elem2D%sPEnum)) + if (.not. allocated(com_elem2D_full%req)) allocate(com_elem2D_full%req(3*com_elem2D_full%rPEnum + 3*com_elem2D_full%sPEnum)) +!================================================ +! MPI DATATYPES +!================================================ + ! Build MPI Data types for halo exchange: Elements + allocate(partit%r_mpitype_elem2D(com_elem2D%rPEnum,4)) ! 2D, small halo + allocate(partit%s_mpitype_elem2D(com_elem2D%sPEnum,4)) + allocate(partit%r_mpitype_elem2D_full_i(com_elem2D_full%rPEnum)) ! 2D, wide halo, integer + allocate(partit%s_mpitype_elem2D_full_i(com_elem2D_full%sPEnum)) + allocate(partit%r_mpitype_elem2D_full(com_elem2D_full%rPEnum,4)) ! 2D, wide halo + allocate(partit%s_mpitype_elem2D_full(com_elem2D_full%sPEnum,4)) + allocate(partit%r_mpitype_elem3D(com_elem2D%rPEnum, nl-1:nl,4)) ! 3D, small halo + allocate(partit%s_mpitype_elem3D(com_elem2D%sPEnum, nl-1:nl,4)) + allocate(partit%r_mpitype_elem3D_full(com_elem2D_full%rPEnum, nl-1:nl,4)) ! 3D, wide halo + allocate(partit%s_mpitype_elem3D_full(com_elem2D_full%sPEnum, nl-1:nl,4)) +!after the allocation we just reassotiate ALL pointers again here +#include "associate_part_ass.h" + ! Upper limit for the length of the local interface between the neighbor PEs + max_nb = max( & + maxval(com_elem2D%rptr(2:com_elem2D%rPEnum+1) - com_elem2D%rptr(1:com_elem2D%rPEnum)), & + maxval(com_elem2D%sptr(2:com_elem2D%sPEnum+1) - com_elem2D%sptr(1:com_elem2D%sPEnum)), & + maxval(com_elem2D_full%rptr(2:com_elem2D_full%rPEnum+1) - com_elem2D_full%rptr(1:com_elem2D_full%rPEnum)), & + maxval(com_elem2D_full%sptr(2:com_elem2D_full%sPEnum+1) - com_elem2D_full%sptr(1:com_elem2D_full%sPEnum))) + + allocate(displace(max_nb), blocklen(max_nb)) + allocate(displace_tmp(max_nb), blocklen_tmp(max_nb)) + + + do n=1,com_elem2D%rPEnum + nb = 1 + nini = com_elem2D%rptr(n) + nend = com_elem2D%rptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_elem2D%rlist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_elem2D%rlist(i) /= com_elem2D%rlist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_elem2D%rlist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + DO n_val=1,4 + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val + displace_tmp(1:nb) = displace(1:nb)*n_val + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + r_mpitype_elem2D(n,n_val), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_elem2D(n,n_val), MPIerr) + + DO nl1=nl-1, nl + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + r_mpitype_elem3D(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_elem3D(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + do n=1,com_elem2D%sPEnum + nb = 1 + nini = com_elem2D%sptr(n) + nend = com_elem2D%sptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_elem2D%slist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_elem2D%slist(i) /= com_elem2D%slist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_elem2D%slist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + DO n_val=1,4 + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val + displace_tmp(1:nb) = displace(1:nb)*n_val + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + s_mpitype_elem2D(n, n_val), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_elem2D(n, n_val), MPIerr) + + DO nl1=nl-1, nl + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + s_mpitype_elem3D(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_elem3D(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + do n=1,com_elem2D_full%rPEnum + nb = 1 + nini = com_elem2D_full%rptr(n) + nend = com_elem2D_full%rptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_elem2D_full%rlist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_elem2D_full%rlist(i) /= com_elem2D_full%rlist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_elem2D_full%rlist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + call MPI_TYPE_INDEXED(nb, blocklen,displace,MPI_INTEGER, r_mpitype_elem2D_full_i(n),MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_elem2D_full_i(n), MPIerr) + + DO n_val=1,4 + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_DOUBLE_PRECISION, & + r_mpitype_elem2D_full(n,n_val), MPIerr) + call MPI_TYPE_COMMIT(r_mpitype_elem2D_full(n, n_val), MPIerr) + + DO nl1=nl-1, nl + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + r_mpitype_elem3D_full(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_elem3D_full(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + do n=1,com_elem2D_full%sPEnum + nb = 1 + nini = com_elem2D_full%sptr(n) + nend = com_elem2D_full%sptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_elem2D_full%slist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_elem2D_full%slist(i) /= com_elem2D_full%slist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_elem2D_full%slist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + call MPI_TYPE_INDEXED(nb, blocklen,displace,MPI_INTEGER, s_mpitype_elem2D_full_i(n), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_elem2D_full_i(n), MPIerr) + + DO n_val=1,4 + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_DOUBLE_PRECISION, & + s_mpitype_elem2D_full(n,n_val),MPIerr) + call MPI_TYPE_COMMIT(s_mpitype_elem2D_full(n,n_val), MPIerr) + + DO nl1=nl-1, nl + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + s_mpitype_elem3D_full(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_elem3D_full(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + deallocate(displace, blocklen) + deallocate(displace_tmp, blocklen_tmp) + + + ! Build MPI Data types for halo exchange: Nodes + + allocate(partit%r_mpitype_nod2D(com_nod2D%rPEnum)) ! 2D + allocate(partit%s_mpitype_nod2D(com_nod2D%sPEnum)) + allocate(partit%r_mpitype_nod2D_i(com_nod2D%rPEnum)) ! 2D integer + allocate(partit%s_mpitype_nod2D_i(com_nod2D%sPEnum)) + + allocate(partit%r_mpitype_nod3D(com_nod2D%rPEnum,nl-1:nl,3)) ! 3D with nl-1 or nl layers, 1-3 values + allocate(partit%s_mpitype_nod3D(com_nod2D%sPEnum,nl-1:nl,3)) +!after the allocation we just reassotiate ALL pointers again here +#include "associate_part_ass.h" + + ! Upper limit for the length of the local interface between the neighbor PEs + max_nb = max(maxval(com_nod2D%rptr(2:com_nod2D%rPEnum+1) - com_nod2D%rptr(1:com_nod2D%rPEnum)), & + maxval(com_nod2D%sptr(2:com_nod2D%sPEnum+1) - com_nod2D%sptr(1:com_nod2D%sPEnum))) + + allocate(displace(max_nb), blocklen(max_nb)) + allocate(displace_tmp(max_nb), blocklen_tmp(max_nb)) + + do n=1,com_nod2D%rPEnum + nb = 1 + nini = com_nod2D%rptr(n) + nend = com_nod2D%rptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_nod2D%rlist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_nod2D%rlist(i) /= com_nod2D%rlist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_nod2D%rlist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_DOUBLE_PRECISION, & + r_mpitype_nod2D(n), MPIerr) + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_INTEGER, & + r_mpitype_nod2D_i(n), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_nod2D(n), MPIerr) + call MPI_TYPE_COMMIT(r_mpitype_nod2D_i(n), MPIerr) + + DO nl1=nl-1, nl + DO n_val=1,3 + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + r_mpitype_nod3D(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(r_mpitype_nod3D(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + do n=1,com_nod2D%sPEnum + nb = 1 + nini = com_nod2D%sptr(n) + nend = com_nod2D%sptr(n+1) - 1 + displace(:) = 0 + displace(1) = com_nod2D%slist(nini) -1 ! C counting, start at 0 + blocklen(:) = 1 + do i=nini+1, nend + if (com_nod2D%slist(i) /= com_nod2D%slist(i-1) + 1) then + ! New block + nb = nb+1 + displace(nb) = com_nod2D%slist(i) -1 + else + blocklen(nb) = blocklen(nb)+1 + endif + enddo + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_DOUBLE_PRECISION, & + s_mpitype_nod2D(n), MPIerr) + + call MPI_TYPE_INDEXED(nb, blocklen, displace, MPI_INTEGER, & + s_mpitype_nod2D_i(n), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_nod2D(n), MPIerr) + call MPI_TYPE_COMMIT(s_mpitype_nod2D_i(n), MPIerr) + + DO nl1=nl-1, nl + DO n_val=1,3 + + blocklen_tmp(1:nb) = blocklen(1:nb)*n_val*nl1 + displace_tmp(1:nb) = displace(1:nb)*n_val*nl1 + + call MPI_TYPE_INDEXED(nb, blocklen_tmp, displace_tmp, MPI_DOUBLE_PRECISION, & + s_mpitype_nod3D(n,nl1,n_val), MPIerr) + + call MPI_TYPE_COMMIT(s_mpitype_nod3D(n,nl1,n_val), MPIerr) + ENDDO + ENDDO + enddo + + deallocate(blocklen, displace) + deallocate(blocklen_tmp, displace_tmp) + + endif + + call init_gatherLists(partit, mesh) + if(mype==0) write(*,*) 'Communication arrays are set' +end subroutine set_par_support + + +!=================================================================== +subroutine init_gatherLists(partit, mesh) + USE MOD_MESH + USE MOD_PARTIT + implicit none + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + integer :: n2D, e2D, sum_loc_elem2D + integer :: n, estart, nstart +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + if (mype==0) then + + if (npes > 1) then + + allocate(partit%remPtr_nod2D(npes)) + allocate(partit%remPtr_elem2D(npes)) +!reassociate the pointers to the just allocated arrays +#include "associate_part_ass.h" + remPtr_nod2D(1) = 1 + remPtr_elem2D(1) = 1 + + do n=1, npes-1 + call MPI_RECV(n2D, 1, MPI_INTEGER, n, 0, MPI_COMM_FESOM, MPI_STATUS_IGNORE, MPIerr ) + call MPI_RECV(e2D, 1, MPI_INTEGER, n, 1, MPI_COMM_FESOM, MPI_STATUS_IGNORE, MPIerr ) + + remPtr_nod2D(n+1) = remPtr_nod2D(n) + n2D + remPtr_elem2D(n+1) = remPtr_elem2D(n) + e2D + enddo + + allocate(partit%remList_nod2D(remPtr_nod2D(npes))) ! this should be nod2D - myDim_nod2D + allocate(partit%remList_elem2D(remPtr_elem2D(npes))) ! this is > elem2D, because the elements overlap. + ! Consider optimization: avoid multiple communication + ! of the same elem from different PEs. +!reassociate the pointers to the just allocated arrays +#include "associate_part_ass.h" + + do n=1, npes-1 + nstart = remPtr_nod2D(n) + n2D = remPtr_nod2D(n+1) - remPtr_nod2D(n) + call MPI_RECV(remList_nod2D(nstart), n2D, MPI_INTEGER, n, 2, MPI_COMM_FESOM, & + MPI_STATUS_IGNORE, MPIerr ) + estart = remPtr_elem2D(n) + e2D = remPtr_elem2D(n+1) - remPtr_elem2D(n) + call MPI_RECV(remList_elem2D(estart),e2D, MPI_INTEGER, n, 3, MPI_COMM_FESOM, & + MPI_STATUS_IGNORE, MPIerr ) + + enddo + end if + else + + call MPI_SEND(myDim_nod2D, 1, MPI_INTEGER, 0, 0, MPI_COMM_FESOM, MPIerr ) + call MPI_SEND(myDim_elem2D, 1, MPI_INTEGER, 0, 1, MPI_COMM_FESOM, MPIerr ) + call MPI_SEND(myList_nod2D, myDim_nod2D, MPI_INTEGER, 0, 2, MPI_COMM_FESOM, MPIerr ) + call MPI_SEND(myList_elem2D, myDim_elem2D, MPI_INTEGER, 0, 3, MPI_COMM_FESOM, MPIerr ) + + endif +end subroutine init_gatherLists diff --git a/src/temp/oce_adv_tra_driver.F90 b/src/temp/oce_adv_tra_driver.F90 new file mode 100644 index 000000000..511e903b7 --- /dev/null +++ b/src/temp/oce_adv_tra_driver.F90 @@ -0,0 +1,278 @@ +module oce_adv_tra_driver_interfaces + interface + subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + integer, intent(in) :: tr_num + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + end subroutine + end interface +end module + +module oce_tra_adv_flux2dtracer_interface + interface + subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) + !update the solution for vertical and horizontal flux contributions + use MOD_MESH + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: flux_v(mesh%nl, partit%myDim_nod2D) + logical, optional :: use_lo + real(kind=WP), optional :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), optional :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + end subroutine + end interface +end module +! +! +!=============================================================================== +subroutine do_oce_adv_tra(dt, vel, w, wi, we, tr_num, tracers, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + use oce_adv_tra_hor_interfaces + use oce_adv_tra_ver_interfaces + use oce_adv_tra_fct_interfaces + use oce_tra_adv_flux2dtracer_interface + implicit none + real(kind=WP), intent(in), target :: dt + integer, intent(in) :: tr_num + type(t_partit), intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + type(t_tracer), intent(inout), target :: tracers + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(in), target :: W(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WI(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in), target :: WE(mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + + real(kind=WP), pointer, dimension (:,:) :: pwvel + real(kind=WP), pointer, dimension (:,:) :: ttf, ttfAB, fct_LO + real(kind=WP), pointer, dimension (:,:) :: adv_flux_hor, adv_flux_ver, dttf_h, dttf_v + real(kind=WP), pointer, dimension (:,:) :: fct_ttf_min, fct_ttf_max + real(kind=WP), pointer, dimension (:,:) :: fct_plus, fct_minus + + integer, pointer, dimension (:) :: nboundary_lay + real(kind=WP), pointer, dimension (:,:,:) :: edge_up_dn_grad + + integer :: el(2), enodes(2), nz, n, e + integer :: nl12, nu12, nl1, nl2, nu1, nu2 + real(kind=WP) :: cLO, cHO, deltaX1, deltaY1, deltaX2, deltaY2 + real(kind=WP) :: qc, qu, qd + real(kind=WP) :: tvert(mesh%nl), tvert_e(mesh%nl), a, b, c, d, da, db, dg, vflux, Tupw1 + real(kind=WP) :: Tmean, Tmean1, Tmean2, num_ord + real(kind=WP) :: opth, optv + logical :: do_zero_flux + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + ttf => tracers%data(tr_num)%values + ttfAB => tracers%data(tr_num)%valuesAB + opth = tracers%data(tr_num)%tra_adv_ph + optv = tracers%data(tr_num)%tra_adv_pv + fct_LO => tracers%work%fct_LO + adv_flux_ver => tracers%work%adv_flux_ver + adv_flux_hor => tracers%work%adv_flux_hor + edge_up_dn_grad => tracers%work%edge_up_dn_grad + nboundary_lay => tracers%work%nboundary_lay + fct_ttf_min => tracers%work%fct_ttf_min + fct_ttf_max => tracers%work%fct_ttf_max + fct_plus => tracers%work%fct_plus + fct_minus => tracers%work%fct_minus + dttf_h => tracers%work%del_ttf_advhoriz + dttf_v => tracers%work%del_ttf_advvert + !___________________________________________________________________________ + ! compute FCT horzontal and vertical low order solution as well as lw order + ! part of antidiffusive flux + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then + ! compute the low order upwind horizontal flux + ! init_zero=.true. : zero the horizontal flux before computation + ! init_zero=.false. : input flux will be substracted + call adv_tra_hor_upw1(vel, ttf, partit, mesh, adv_flux_hor, init_zero=.true.) + ! update the LO solution for horizontal contribution + fct_LO=0.0_WP + do e=1, myDim_edge2D + enodes=edges(:,e) + el=edge_tri(:,e) + nl1=nlevels(el(1))-1 + nu1=ulevels(el(1)) + nl2=0 + nu2=0 + if(el(2)>0) then + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + end if + + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + + !!PS do nz=1, max(nl1, nl2) + do nz=nu12, nl12 + fct_LO(nz, enodes(1))=fct_LO(nz, enodes(1))+adv_flux_hor(nz, e) + fct_LO(nz, enodes(2))=fct_LO(nz, enodes(2))-adv_flux_hor(nz, e) + end do + end do + ! compute the low order upwind vertical flux (explicit part only) + ! zero the input/output flux before computation + call adv_tra_ver_upw1(we, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) + ! update the LO solution for vertical contribution + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS do nz=1, nlevels_nod2D(n)-1 + do nz= nu1, nl1-1 + fct_LO(nz,n)=(ttf(nz,n)*hnode(nz,n)+(fct_LO(nz,n)+(adv_flux_ver(nz, n)-adv_flux_ver(nz+1, n)))*dt/areasvol(nz,n))/hnode_new(nz,n) + end do + end do + if (w_split) then !wvel/=wvel_e + ! update for implicit contribution (w_split option) + call adv_tra_vert_impl(dt, wi, fct_LO, partit, mesh) + ! compute the low order upwind vertical flux (full vertical velocity) + ! zero the input/output flux before computation + ! --> compute here low order part of vertical anti diffusive fluxes, + ! has to be done on the full vertical velocity w + call adv_tra_ver_upw1(w, ttf, partit, mesh, adv_flux_ver, init_zero=.true.) + end if + call exchange_nod(fct_LO, partit) + end if + + do_zero_flux=.true. + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') do_zero_flux=.false. + !___________________________________________________________________________ + ! do horizontal tracer advection, in case of FCT high order solution + SELECT CASE(trim(tracers%data(tr_num)%tra_adv_hor)) + CASE('MUSCL') + ! compute the untidiffusive horizontal flux (init_zero=.false.: input is the LO horizontal flux computed above) + call adv_tra_hor_muscl(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, nboundary_lay, init_zero=do_zero_flux) + CASE('MFCT') + call adv_tra_hor_mfct(vel, ttfAB, partit, mesh, opth, adv_flux_hor, edge_up_dn_grad, init_zero=do_zero_flux) + CASE('UPW1') + call adv_tra_hor_upw1(vel, ttfAB, partit, mesh, adv_flux_hor, init_zero=do_zero_flux) + CASE DEFAULT !unknown + if (mype==0) write(*,*) 'Unknown horizontal advection type ', trim(tracers%data(tr_num)%tra_adv_hor), '! Check your namelists!' + call par_ex(partit, 1) + END SELECT + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then + pwvel=>w + else + pwvel=>we + end if + !___________________________________________________________________________ + ! do vertical tracer advection, in case of FCT high order solution + SELECT CASE(trim(tracers%data(tr_num)%tra_adv_ver)) + CASE('QR4C') + ! compute the untidiffusive vertical flux (init_zero=.false.:input is the LO vertical flux computed above) + call adv_tra_ver_qr4c ( pwvel, ttfAB, partit, mesh, optv, adv_flux_ver, init_zero=do_zero_flux) + CASE('CDIFF') + call adv_tra_ver_cdiff( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + CASE('PPM') + call adv_tra_vert_ppm(dt, pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + CASE('UPW1') + call adv_tra_ver_upw1 ( pwvel, ttfAB, partit, mesh, adv_flux_ver, init_zero=do_zero_flux) + CASE DEFAULT !unknown + if (mype==0) write(*,*) 'Unknown vertical advection type ', trim(tracers%data(tr_num)%tra_adv_ver), '! Check your namelists!' + call par_ex(1) + ! --> be aware the vertical implicite part in case without FCT is done in + ! oce_ale_tracer.F90 --> subroutine diff_ver_part_impl_ale(tr_num, partit, mesh) + ! for do_wimpl=.true. + END SELECT + !___________________________________________________________________________ + ! + if (trim(tracers%data(tr_num)%tra_adv_lim)=='FCT') then + !edge_up_dn_grad will be used as an auxuary array here + call oce_tra_adv_fct(dt, ttf, fct_LO, adv_flux_hor, adv_flux_ver, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, edge_up_dn_grad, partit, mesh) + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, partit, mesh, use_lo=.TRUE., ttf=ttf, lo=fct_LO) + else + call oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, adv_flux_hor, adv_flux_ver, partit, mesh) + end if +end subroutine do_oce_adv_tra +! +! +!=============================================================================== +subroutine oce_tra_adv_flux2dtracer(dt, dttf_h, dttf_v, flux_h, flux_v, partit, mesh, use_lo, ttf, lo) + use MOD_MESH + use o_ARRAYS + use MOD_PARTIT + use g_comm_auto + implicit none + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: dttf_h(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: dttf_v(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: flux_v(mesh%nl, partit%myDim_nod2D) + logical, optional :: use_lo + real(kind=WP), optional :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), optional :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + integer :: n, nz, k, elem, enodes(3), num, el(2), nu12, nl12, nu1, nu2, nl1, nl2, edge +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + !___________________________________________________________________________ + ! c. Update the solution + ! Vertical + if (present(use_lo)) then + if (use_lo) then + do n=1, myDim_nod2d + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + !!PS do nz=1,nlevels_nod2D(n)-1 + do nz=nu1, nl1-1 + dttf_v(nz,n)=dttf_v(nz,n)-ttf(nz,n)*hnode(nz,n)+LO(nz,n)*hnode_new(nz,n) + end do + end do + end if + end if + + do n=1, myDim_nod2d + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 + dttf_v(nz,n)=dttf_v(nz,n) + (flux_v(nz,n)-flux_v(nz+1,n))*dt/areasvol(nz,n) + end do + end do + + + ! Horizontal + do edge=1, myDim_edge2D + enodes(1:2)=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + nu1=ulevels(el(1)) + + nl2=0 + nu2=0 + if(el(2)>0) then + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + end if + + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + + !!PS do nz=1, max(nl1, nl2) + do nz=nu12, nl12 + dttf_h(nz,enodes(1))=dttf_h(nz,enodes(1))+flux_h(nz,edge)*dt/areasvol(nz,enodes(1)) + dttf_h(nz,enodes(2))=dttf_h(nz,enodes(2))-flux_h(nz,edge)*dt/areasvol(nz,enodes(2)) + end do + end do +end subroutine oce_tra_adv_flux2dtracer diff --git a/src/temp/oce_adv_tra_fct.F90 b/src/temp/oce_adv_tra_fct.F90 new file mode 100644 index 000000000..5eb7993a9 --- /dev/null +++ b/src/temp/oce_adv_tra_fct.F90 @@ -0,0 +1,365 @@ +module oce_adv_tra_fct_interfaces + interface + subroutine oce_adv_tra_fct_init(twork, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + type(t_mesh), intent(in), target :: mesh + type(t_partit),intent(inout), target :: partit + type(t_tracer_work), intent(inout), target :: twork + end subroutine + + subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) + use MOD_MESH + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(inout), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array + end subroutine + end interface +end module +! +! +!=============================================================================== +subroutine oce_adv_tra_fct_init(twork, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + implicit none + integer :: my_size + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer_work), intent(inout), target :: twork +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + my_size=myDim_nod2D+eDim_nod2D + allocate(twork%fct_LO(nl-1, my_size)) ! Low-order solution + allocate(twork%adv_flux_hor(nl-1,partit%myDim_edge2D)) ! antidiffusive hor. contributions / from edges + allocate(twork%adv_flux_ver(nl, partit%myDim_nod2D)) ! antidiffusive ver. fluxes / from nodes + + allocate(twork%fct_ttf_max(nl-1, my_size),twork%fct_ttf_min(nl-1, my_size)) + allocate(twork%fct_plus(nl-1, my_size), twork%fct_minus(nl-1, my_size)) + ! Initialize with zeros: + twork%fct_LO=0.0_WP + twork%adv_flux_hor=0.0_WP + twork%adv_flux_ver=0.0_WP + twork%fct_ttf_max=0.0_WP + twork%fct_ttf_min=0.0_WP + twork%fct_plus=0.0_WP + twork%fct_minus=0.0_WP + + if (mype==0) write(*,*) 'FCT is initialized' +end subroutine oce_adv_tra_fct_init + +! +! +!=============================================================================== +subroutine oce_tra_adv_fct(dt, ttf, lo, adf_h, adf_v, fct_ttf_min, fct_ttf_max, fct_plus, fct_minus, AUX, partit, mesh) + ! + ! 3D Flux Corrected Transport scheme + ! Limits antidiffusive fluxes==the difference in flux HO-LO + ! LO ==Low-order (first-order upwind) + ! HO ==High-order (3rd/4th order gradient reconstruction method) + ! Adds limited fluxes to the LO solution + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + real(kind=WP), intent(in), target :: dt + type(t_mesh), intent(in), target :: mesh + type(t_partit),intent(inout), target :: partit + real(kind=WP), intent(inout) :: fct_ttf_min(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_ttf_max(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: lo (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: adf_h(mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(inout) :: adf_v(mesh%nl, partit%myDim_nod2D) + real(kind=WP), intent(inout) :: fct_plus (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: fct_minus(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: AUX(:,:,:) !a large auxuary array, let us use twork%edge_up_dn_grad(1:4, 1:NL-2, 1:partit%myDim_edge2D) to save space + integer :: n, nz, k, elem, enodes(3), num, el(2), nl1, nl2, nu1, nu2, nl12, nu12, edge + real(kind=WP) :: flux, ae,tvert_max(mesh%nl-1),tvert_min(mesh%nl-1) + real(kind=WP) :: flux_eps=1e-16 + real(kind=WP) :: bignumber=1e3 + integer :: vlimit=1 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + ! -------------------------------------------------------------------------- + ! ttf is the tracer field on step n + ! del_ttf is the increment + ! vlimit sets the version of limiting, see below + ! -------------------------------------------------------------------------- + !___________________________________________________________________________ + ! a1. max, min between old solution and updated low-order solution per node + do n=1,myDim_nod2D + edim_nod2d + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1, nl1-1 + fct_ttf_max(nz,n)=max(LO(nz,n), ttf(nz,n)) + fct_ttf_min(nz,n)=min(LO(nz,n), ttf(nz,n)) + end do + end do + + !___________________________________________________________________________ + ! a2. Admissible increments on elements + ! (only layers below the first and above the last layer) + ! look for max, min bounds for each element --> AUX here auxilary array + do elem=1, myDim_elem2D + enodes=elem2D_nodes(:,elem) + nu1 = ulevels(elem) + nl1 = nlevels(elem) + do nz=nu1, nl1-1 + AUX(1,nz,elem)=maxval(fct_ttf_max(nz,enodes)) + AUX(2,nz,elem)=minval(fct_ttf_min(nz,enodes)) + end do + if (nl1<=nl-1) then + do nz=nl1,nl-1 + AUX(1,nz,elem)=-bignumber + AUX(2,nz,elem)= bignumber + end do + endif + end do ! --> do elem=1, myDim_elem2D + + !___________________________________________________________________________ + ! a3. Bounds on clusters and admissible increments + ! Vertical1: In this version we look at the bounds on the clusters + ! above and below, which leaves wide bounds because typically + ! vertical gradients are larger. + if(vlimit==1) then + !Horizontal + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + + !___________________________________________________________________ + do nz=nu1,nl1-1 + ! max,min horizontal bound in cluster around node n in every + ! vertical layer + ! nod_in_elem2D --> elem indices of which node n is surrounded + ! nod_in_elem2D_num --> max number of surrounded elem + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + end do + + !___________________________________________________________________ + ! calc max,min increment of surface layer with respect to low order + ! solution + fct_ttf_max(nu1,n)=tvert_max(nu1)-LO(nu1,n) + fct_ttf_min(nu1,n)=tvert_min(nu1)-LO(nu1,n) + + ! calc max,min increment from nz-1:nz+1 with respect to low order + ! solution at layer nz + do nz=nu1+1,nl1-2 + fct_ttf_max(nz,n)=maxval(tvert_max(nz-1:nz+1))-LO(nz,n) + fct_ttf_min(nz,n)=minval(tvert_min(nz-1:nz+1))-LO(nz,n) + end do + ! calc max,min increment of bottom layer -1 with respect to low order + ! solution + nz=nl1-1 + fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) + fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) + end do + end if + + !___________________________________________________________________________ + ! Vertical2: Similar to the version above, but the vertical bounds are more + ! local + if(vlimit==2) then + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + end do + do nz=nu1+1, nl1-2 + tvert_max(nz)=max(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) + tvert_min(nz)=min(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) + end do + do nz=nu1,nl1-1 + fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) + fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) + end do + end do + end if + + !___________________________________________________________________________ + ! Vertical3: Vertical bounds are taken into account only if they are narrower than the + ! horizontal ones + if(vlimit==3) then + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1, nl1-1 + tvert_max(nz)= maxval(AUX(1,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + tvert_min(nz)= minval(AUX(2,nz,nod_in_elem2D(1:nod_in_elem2D_num(n),n))) + end do + do nz=nu1+1, nl1-2 + tvert_max(nz)=min(tvert_max(nz),maxval(fct_ttf_max(nz-1:nz+1,n))) + tvert_min(nz)=max(tvert_min(nz),minval(fct_ttf_max(nz-1:nz+1,n))) + end do + do nz=nu1, nl1-1 + fct_ttf_max(nz,n)=tvert_max(nz)-LO(nz,n) + fct_ttf_min(nz,n)=tvert_min(nz)-LO(nz,n) + end do + end do + end if + + !___________________________________________________________________________ + ! b1. Split positive and negative antidiffusive contributions + ! --> sum all positive (fct_plus), negative (fct_minus) antidiffusive + ! horizontal element and vertical node contribution to node n and layer nz + ! see. R. Löhner et al. "finite element flux corrected transport (FEM-FCT) + ! for the euler and navier stoke equation + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 + fct_plus(nz,n)=0._WP + fct_minus(nz,n)=0._WP + end do + end do + + !Vertical + do n=1, myDim_nod2D + nu1 = ulevels_nod2D(n) + nl1 = nlevels_nod2D(n) + do nz=nu1,nl1-1 +! fct_plus(nz,n)=fct_plus(nz,n)+ & +! (max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) & +! /hnode(nz,n) +! fct_minus(nz,n)=fct_minus(nz,n)+ & +! (min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) & +! /hnode(nz,n) + fct_plus(nz,n) =fct_plus(nz,n) +(max(0.0_WP,adf_v(nz,n))+max(0.0_WP,-adf_v(nz+1,n))) + fct_minus(nz,n)=fct_minus(nz,n)+(min(0.0_WP,adf_v(nz,n))+min(0.0_WP,-adf_v(nz+1,n))) + end do + end do + + !Horizontal + do edge=1, myDim_edge2D + enodes(1:2)=edges(:,edge) + el=edge_tri(:,edge) + nl1=nlevels(el(1))-1 + nu1=ulevels(el(1)) + nl2=0 + nu2=0 + if(el(2)>0) then + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + end if + + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + + do nz=nu12, nl12 + fct_plus (nz,enodes(1))=fct_plus (nz,enodes(1)) + max(0.0_WP, adf_h(nz,edge)) + fct_minus(nz,enodes(1))=fct_minus(nz,enodes(1)) + min(0.0_WP, adf_h(nz,edge)) + fct_plus (nz,enodes(2))=fct_plus (nz,enodes(2)) + max(0.0_WP,-adf_h(nz,edge)) + fct_minus(nz,enodes(2))=fct_minus(nz,enodes(2)) + min(0.0_WP,-adf_h(nz,edge)) + end do + end do + + !___________________________________________________________________________ + ! b2. Limiting factors + do n=1,myDim_nod2D + nu1=ulevels_nod2D(n) + nl1=nlevels_nod2D(n) + do nz=nu1,nl1-1 + flux=fct_plus(nz,n)*dt/areasvol(nz,n)+flux_eps + fct_plus(nz,n)=min(1.0_WP,fct_ttf_max(nz,n)/flux) + flux=fct_minus(nz,n)*dt/areasvol(nz,n)-flux_eps + fct_minus(nz,n)=min(1.0_WP,fct_ttf_min(nz,n)/flux) + end do + end do + + ! fct_minus and fct_plus must be known to neighbouring PE + call exchange_nod(fct_plus, fct_minus, partit) + + !___________________________________________________________________________ + ! b3. Limiting + !Vertical + do n=1, myDim_nod2D + nu1=ulevels_nod2D(n) + nl1=nlevels_nod2D(n) + + !_______________________________________________________________________ + nz=nu1 + ae=1.0_WP + flux=adf_v(nz,n) + if(flux>=0.0_WP) then + ae=min(ae,fct_plus(nz,n)) + else + ae=min(ae,fct_minus(nz,n)) + end if + adf_v(nz,n)=ae*adf_v(nz,n) + + !_______________________________________________________________________ + do nz=nu1+1,nl1-1 + ae=1.0_WP + flux=adf_v(nz,n) + if(flux>=0._WP) then + ae=min(ae,fct_minus(nz-1,n)) + ae=min(ae,fct_plus(nz,n)) + else + ae=min(ae,fct_plus(nz-1,n)) + ae=min(ae,fct_minus(nz,n)) + end if + adf_v(nz,n)=ae*adf_v(nz,n) + end do + ! the bottom flux is always zero + end do + + call exchange_nod_end(partit) ! fct_plus, fct_minus + + !Horizontal + do edge=1, myDim_edge2D + enodes(1:2)=edges(:,edge) + el=edge_tri(:,edge) + nu1=ulevels(el(1)) + nl1=nlevels(el(1))-1 + nl2=0 + nu2=0 + if(el(2)>0) then + nu2=ulevels(el(2)) + nl2=nlevels(el(2))-1 + end if + + nl12 = max(nl1,nl2) + nu12 = nu1 + if (nu2>0) nu12 = min(nu1,nu2) + + do nz=nu12, nl12 + ae=1.0_WP + flux=adf_h(nz,edge) + + if(flux>=0._WP) then + ae=min(ae,fct_plus(nz,enodes(1))) + ae=min(ae,fct_minus(nz,enodes(2))) + else + ae=min(ae,fct_minus(nz,enodes(1))) + ae=min(ae,fct_plus(nz,enodes(2))) + endif + + adf_h(nz,edge)=ae*adf_h(nz,edge) + end do + end do +end subroutine oce_tra_adv_fct diff --git a/src/temp/oce_adv_tra_hor.F90 b/src/temp/oce_adv_tra_hor.F90 new file mode 100644 index 000000000..714eccf68 --- /dev/null +++ b/src/temp/oce_adv_tra_hor.F90 @@ -0,0 +1,739 @@ +!=============================================================================================================================== +!**************** routines for horizontal tracer advection *********************** +module oce_adv_tra_hor_interfaces + interface +! (low order upwind) +! returns flux given at edges which contributes with +! plus sign into 1st. node and with the minus sign into the 2nd node +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + end subroutine +!=============================================================================== +! MUSCL +! returns flux given at edges which contributes with +! plus sign into 1st. node and with the minus sign into the 2nd node +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + end subroutine +! a not stable version of MUSCL (reconstruction in the vicinity of bottom topography is not upwind) +! it runs with FCT option only + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + end subroutine + end interface +end module +! +! +!=============================================================================== +subroutine adv_tra_hor_upw1(vel, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + use g_comm_auto + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + real(kind=WP) :: a, vflux + integer :: el(2), enodes(2), nz, edge + integer :: nu12, nl12, nl1, nl2, nu1, nu2 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + ! The result is the low-order solution horizontal fluxes + ! They are put into flux + !___________________________________________________________________________ + do edge=1, myDim_edge2D + ! local indice of nodes that span up edge ed + enodes=edges(:,edge) + + ! local index of element that contribute to edge + el=edge_tri(:,edge) + + ! number of layers -1 at elem el(1) + nl1=nlevels(el(1))-1 + + ! index off surface layer in case of cavity !=1 + nu1=ulevels(el(1)) + + ! edge_cross_dxdy(1:2,ed)... dx,dy distance from element centroid el(1) to + ! center of edge --> needed to calc flux perpedicular to edge from elem el(1) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + a=r_earth*elem_cos(el(1)) + + !_______________________________________________________________________ + ! same parameter but for other element el(2) that contributes to edge ed + ! if el(2)==0 than edge is boundary edge + nl2=0 + nu2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + ! number of layers -1 at elem el(2) + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + a=0.5_WP*(a+r_earth*elem_cos(el(2))) + end if + + !_______________________________________________________________________ + ! nl12 ... minimum number of layers -1 between element el(1) & el(2) that + ! contribute to edge ed + ! nu12 ... upper index of layers between element el(1) & el(2) that + ! contribute to edge ed + ! be carefull !!! --> if ed is a boundary edge than el(1)~=0 and el(2)==0 + ! that means nl1>0, nl2==0, n2=min(nl1,nl2)=0 !!! + nl12=min(nl1,nl2) + nu12=max(nu1,nu2) + + !_______________________________________________________________________ + ! (A) goes only into this loop when the edge has only facing element + ! el(1) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + do nz=nu1, nu12-1 + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + + !____________________________________________________________________ + ! 1st. low order upwind solution + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)) & + )-flux(nz, edge) + end do + + !_______________________________________________________________________ + ! (B) goes only into this loop when the edge has only facing elemenmt + ! el(2) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + if (nu2 > 0) then + do nz=nu2, nu12-1 + !___________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + + !___________________________________________________________ + ! 1st. low order upwind solution + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) + end do + end if + + !_______________________________________________________________________ + ! (C) Both segments + ! loop over depth layers from top (nu12) to nl12 + ! be carefull !!! --> if ed is a boundary edge, el(2)==0 than nl12=0 so + ! you wont enter in this loop + do nz=nu12, nl12 + !___________________________________________________________________ + ! 1st. low order upwind solution + ! here already assumed that ed is NOT! a boundary edge so el(2) should exist + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) & + +(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) + end do + + !_______________________________________________________________________ + ! (D) remaining segments on the left or on the right + do nz=nl12+1, nl1 + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + !____________________________________________________________________ + ! 1st. low order upwind solution + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)) & + )-flux(nz, edge) + end do + + !_______________________________________________________________________ + ! (E) remaining segments on the left or on the right + do nz=nl12+1, nl2 + !_______________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + !_______________________________________________________________ + ! 1st. low order upwind solution + flux(nz, edge)=-0.5_WP*( & + ttf(nz, enodes(1))*(vflux+abs(vflux))+ & + ttf(nz, enodes(2))*(vflux-abs(vflux)))-flux(nz, edge) + end do + end do +end subroutine adv_tra_hor_upw1 +! +! +!=============================================================================== +subroutine adv_tra_hor_muscl(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, nboundary_lay, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + integer, intent(in) :: nboundary_lay(partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + real(kind=WP) :: Tmean1, Tmean2, cHO + real(kind=WP) :: c_lo(2) + real(kind=WP) :: a, vflux + integer :: el(2), enodes(2), nz, edge + integer :: nu12, nl12, nl1, nl2, nu1, nu2 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + ! The result is the low-order solution horizontal fluxes + ! They are put into flux + !___________________________________________________________________________ + do edge=1, myDim_edge2D + ! local indice of nodes that span up edge ed + enodes=edges(:,edge) + + ! local index of element that contribute to edge + el=edge_tri(:,edge) + + ! number of layers -1 at elem el(1) + nl1=nlevels(el(1))-1 + + ! index off surface layer in case of cavity !=1 + nu1=ulevels(el(1)) + + ! edge_cross_dxdy(1:2,ed)... dx,dy distance from element centroid el(1) to + ! center of edge --> needed to calc flux perpedicular to edge from elem el(1) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + a=r_earth*elem_cos(el(1)) + + !_______________________________________________________________________ + ! same parameter but for other element el(2) that contributes to edge ed + ! if el(2)==0 than edge is boundary edge + nl2=0 + nu2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + ! number of layers -1 at elem el(2) + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + a=0.5_WP*(a+r_earth*elem_cos(el(2))) + end if + + !_______________________________________________________________________ + ! n2 ... minimum number of layers -1 between element el(1) & el(2) that + ! contribute to edge ed + ! nu12 ... upper index of layers between element el(1) & el(2) that + ! contribute to edge ed + ! be carefull !!! --> if ed is a boundary edge than el(1)~=0 and el(2)==0 + ! that means nl1>0, nl2==0, n2=min(nl1,nl2)=0 !!! + nl12=min(nl1,nl2) + nu12=max(nu1,nu2) + + !_______________________________________________________________________ + ! (A) goes only into this loop when the edge has only facing element + ! el(1) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + do nz=nu1, nu12-1 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (B) goes only into this loop when the edge has only facing elemenmt + ! el(2) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + if (nu2 > 0) then + do nz=nu2, nu12-1 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !_______________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !_______________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + end if + + !_______________________________________________________________________ + ! (C) Both segments + ! loop over depth layers from top to n2 + ! be carefull !!! --> if ed is a boundary edge, el(2)==0 than n2=0 so + ! you wont enter in this loop + do nz=nu12, nl12 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !___________________________________________________________________ + ! MUSCL-type reconstruction + ! check if upwind or downwind triagle is necessary + ! + ! cross product between velocity vector and cross vector edge-elem-center + ! cross product > 0 --> angle vec_v and (dx,dy) --> [0 180] --> upwind triangle + ! cross product < 0 --> angle vec_v and (dx,dy) --> [180 360] --> downwind triangle + ! + ! o o ! o o + ! / \ / \ ! / \ / \ + ! / \ \ vec_v / \ ! / \ / / \ + ! / up \ \ / dn \ ! / up \ / / dn \ + ! o-------o----+---->o-------o ! o-------o----+---->o-------o + ! 1 / 2 ! 1 \vec_v + ! /vec_v ! \ + ! --> downwind triangle ! --> upwind triangle + ! + ! edge_up_dn_grad(1,nz,edge) ... gradTR_x upwind + ! edge_up_dn_grad(2,nz,edge) ... gradTR_x downwind + ! edge_up_dn_grad(3,nz,edge) ... gradTR_y upwind + ! edge_up_dn_grad(4,nz,edge) ... gradTR_y downwind + + !___________________________________________________________________ + ! use downwind triangle to interpolate Tracer to edge center with + ! fancy scheme --> Linear upwind reconstruction + ! T_n+0.5 = T_n+1 - 1/2*deltax*GRADIENT + ! --> GRADIENT = 2/3 GRAD_edgecenter + 1/3 GRAD_downwindtri + ! T_n+0.5 = T_n+1 - 2/6*(T_n+1-T_n) + 1/6*gradT_down + ! --> edge_up_dn_grad ... contains already elemental tracer gradient + ! of up and dn wind triangle + ! --> Tmean2 ... edge center interpolated Tracer using tracer + ! gradient info from upwind triangle + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + ! use upwind triangle to interpolate Tracer to edge center with + ! fancy scheme --> Linear upwind reconstruction + ! T_n+0.5 = T_n + 1/2*deltax*GRADIENT + ! --> GRADIENT = 2/3 GRAD_edgecenter + 1/3 GRAD_downwindtri + ! T_n+0.5 = T_n + 2/6*(T_n+1-T_n) + 1/6*gradT_down + ! --> Tmean1 ... edge center interpolated Tracer using tracer + ! gradient info from downwind triangle + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !___________________________________________________________________ + ! volume flux along the edge segment ed + ! netto volume flux along segment that comes from edge node 1 and 2 + ! + ! + ! C1 (centroid el(1)) --> (u1,v1) + ! x + ! ^ + ! (dx1,dy1) | + ! |---> vec_n1 (dy1,-dx1)--> project vec_u1 onto vec_n1 --> -v1*dx1+u1*dy1 --> + ! | | + ! enodes(1) o----------O---------o enodes(2) |-> calculate volume flux out of/in + ! vflux_________/| | the volume of enode1(enode2) through + ! |---> vec_n2 (dy2,-dx2)--> project vec_u2 onto vec_n2 --> -v2*dx2+u2*dy2 --> sections of dx1,dy1 and dx2,dy2 + ! (dx2,dy2) | --> vflux + ! v + ! x + ! C2 (centroid el(2)) --> (u2,v2) + + ! here already assumed that ed is NOT! a boundary edge so el(2) should exist + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) & + +(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + + !___________________________________________________________________ + ! (1-num_ord) is done with 3rd order upwind + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (D) remaining segments on the left or on the right + do nz=nl12+1, nl1 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (E) remaining segments on the left or on the right + do nz=nl12+1, nl2 + c_lo(1)=real(max(sign(1, nboundary_lay(enodes(1))-nz), 0),WP) + c_lo(2)=real(max(sign(1, nboundary_lay(enodes(2))-nz), 0),WP) + + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP*c_lo(2) + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP*c_lo(1) + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + end do +end subroutine adv_tra_hor_muscl +! +! +!=============================================================================== + subroutine adv_tra_hor_mfct(vel, ttf, partit, mesh, num_ord, flux, edge_up_dn_grad, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf( mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: vel(2, mesh%nl-1, partit%myDim_elem2D+partit%eDim_elem2D) + real(kind=WP), intent(inout) :: flux( mesh%nl-1, partit%myDim_edge2D) + real(kind=WP), intent(in) :: edge_up_dn_grad(4, mesh%nl-1, partit%myDim_edge2D) + logical, optional :: init_zero + real(kind=WP) :: deltaX1, deltaY1, deltaX2, deltaY2 + real(kind=WP) :: Tmean1, Tmean2, cHO + real(kind=WP) :: a, vflux + integer :: el(2), enodes(2), nz, edge + integer :: nu12, nl12, nl1, nl2, nu1, nu2 + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + ! The result is the low-order solution horizontal fluxes + ! They are put into flux + !___________________________________________________________________________ + do edge=1, myDim_edge2D + ! local indice of nodes that span up edge ed + enodes=edges(:,edge) + + ! local index of element that contribute to edge + el=edge_tri(:,edge) + + ! number of layers -1 at elem el(1) + nl1=nlevels(el(1))-1 + + ! index off surface layer in case of cavity !=1 + nu1=ulevels(el(1)) + + ! edge_cross_dxdy(1:2,ed)... dx,dy distance from element centroid el(1) to + ! center of edge --> needed to calc flux perpedicular to edge from elem el(1) + deltaX1=edge_cross_dxdy(1,edge) + deltaY1=edge_cross_dxdy(2,edge) + a=r_earth*elem_cos(el(1)) + + !_______________________________________________________________________ + ! same parameter but for other element el(2) that contributes to edge ed + ! if el(2)==0 than edge is boundary edge + nl2=0 + nu2=0 + if(el(2)>0) then + deltaX2=edge_cross_dxdy(3,edge) + deltaY2=edge_cross_dxdy(4,edge) + ! number of layers -1 at elem el(2) + nl2=nlevels(el(2))-1 + nu2=ulevels(el(2)) + a=0.5_WP*(a+r_earth*elem_cos(el(2))) + end if + + !_______________________________________________________________________ + ! n2 ... minimum number of layers -1 between element el(1) & el(2) that + ! contribute to edge ed + ! nu12 ... upper index of layers between element el(1) & el(2) that + ! contribute to edge ed + ! be carefull !!! --> if ed is a boundary edge than el(1)~=0 and el(2)==0 + ! that means nl1>0, nl2==0, n2=min(nl1,nl2)=0 !!! + nl12=min(nl1,nl2) + nu12=max(nu1,nu2) + + !_______________________________________________________________________ + ! (A) goes only into this loop when the edge has only facing element + ! el(1) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + do nz=nu1, nu12-1 + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (B) goes only into this loop when the edge has only facing elemenmt + ! el(2) --> so the edge is a boundary edge --> this is for ocean + ! surface in case of cavity + if (nu2 > 0) then + do nz=nu2,nu12-1 + !___________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + !___________________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + end if + + !_______________________________________________________________________ + ! (C) Both segments + ! loop over depth layers from top to n2 + ! be carefull !!! --> if ed is a boundary edge, el(2)==0 than n2=0 so + ! you wont enter in this loop + do nz=nu12, nl12 + !___________________________________________________________________ + ! MUSCL-type reconstruction + ! check if upwind or downwind triagle is necessary + ! + ! cross product between velocity vector and cross vector edge-elem-center + ! cross product > 0 --> angle vec_v and (dx,dy) --> [0 180] --> upwind triangle + ! cross product < 0 --> angle vec_v and (dx,dy) --> [180 360] --> downwind triangle + ! + ! o o ! o o + ! / \ / \ ! / \ / \ + ! / \ \ vec_v / \ ! / \ / / \ + ! / up \ \ / dn \ ! / up \ / / dn \ + ! o-------o----+---->o-------o ! o-------o----+---->o-------o + ! 1 / 2 ! 1 \vec_v + ! /vec_v ! \ + ! --> downwind triangle ! --> upwind triangle + ! + ! edge_up_dn_grad(1,nz,edge) ... gradTR_x upwind + ! edge_up_dn_grad(2,nz,edge) ... gradTR_x downwind + ! edge_up_dn_grad(3,nz,edge) ... gradTR_y upwind + ! edge_up_dn_grad(4,nz,edge) ... gradTR_y downwind + + !___________________________________________________________________ + ! use downwind triangle to interpolate Tracer to edge center with + ! fancy scheme --> Linear upwind reconstruction + ! T_n+0.5 = T_n+1 - 1/2*deltax*GRADIENT + ! --> GRADIENT = 2/3 GRAD_edgecenter + 1/3 GRAD_downwindtri + ! T_n+0.5 = T_n+1 - 2/6*(T_n+1-T_n) + 1/6*gradT_down + ! --> edge_up_dn_grad ... contains already elemental tracer gradient + ! of up and dn wind triangle + ! --> Tmean2 ... edge center interpolated Tracer using tracer + ! gradient info from upwind triangle + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + ! use upwind triangle to interpolate Tracer to edge center with + ! fancy scheme --> Linear upwind reconstruction + ! T_n+0.5 = T_n + 1/2*deltax*GRADIENT + ! --> GRADIENT = 2/3 GRAD_edgecenter + 1/3 GRAD_downwindtri + ! T_n+0.5 = T_n + 2/6*(T_n+1-T_n) + 1/6*gradT_down + ! --> Tmean1 ... edge center interpolated Tracer using tracer + ! gradient info from downwind triangle + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + !___________________________________________________________________ + ! volume flux along the edge segment ed + ! netto volume flux along segment that comes from edge node 1 and 2 + ! + ! + ! C1 (centroid el(1)) --> (u1,v1) + ! x + ! ^ + ! (dx1,dy1) | + ! |---> vec_n1 (dy1,-dx1)--> project vec_u1 onto vec_n1 --> -v1*dx1+u1*dy1 --> + ! | | + ! enodes(1) o----------O---------o enodes(2) |-> calculate volume flux out of/in + ! vflux_________/| | the volume of enode1(enode2) through + ! |---> vec_n2 (dy2,-dx2)--> project vec_u2 onto vec_n2 --> -v2*dx2+u2*dy2 --> sections of dx1,dy1 and dx2,dy2 + ! (dx2,dy2) | --> vflux + ! v + ! x + ! C2 (centroid el(2)) --> (u2,v2) + + ! here already assumed that ed is NOT! a boundary edge so el(2) should exist + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) & + +(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + + !___________________________________________________________________ + ! (1-num_ord) is done with 3rd order upwind + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (D) remaining segments on the left or on the right + do nz=nl12+1, nl1 + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(-VEL(2,nz,el(1))*deltaX1 + VEL(1,nz,el(1))*deltaY1)*helem(nz,el(1)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + + !_______________________________________________________________________ + ! (E) remaining segments on the left or on the right + do nz=nl12+1, nl2 + !____________________________________________________________________ + Tmean2=ttf(nz, enodes(2))- & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(2,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(4,nz,edge))/6.0_WP + + Tmean1=ttf(nz, enodes(1))+ & + (2.0_WP*(ttf(nz, enodes(2))-ttf(nz,enodes(1)))+ & + edge_dxdy(1,edge)*a*edge_up_dn_grad(1,nz,edge)+ & + edge_dxdy(2,edge)*r_earth*edge_up_dn_grad(3,nz,edge))/6.0_WP + + !____________________________________________________________________ + ! volume flux across the segments + vflux=(VEL(2,nz,el(2))*deltaX2 - VEL(1,nz,el(2))*deltaY2)*helem(nz,el(2)) + cHO=(vflux+abs(vflux))*Tmean1 + (vflux-abs(vflux))*Tmean2 + flux(nz,edge)=-0.5_WP*(1.0_WP-num_ord)*cHO - vflux*num_ord*0.5_WP*(Tmean1+Tmean2)-flux(nz,edge) + end do + end do +end subroutine adv_tra_hor_mfct + diff --git a/src/temp/oce_adv_tra_ver.F90 b/src/temp/oce_adv_tra_ver.F90 new file mode 100644 index 000000000..eab9847a8 --- /dev/null +++ b/src/temp/oce_adv_tra_ver.F90 @@ -0,0 +1,598 @@ +module oce_adv_tra_ver_interfaces + interface +! implicit 1st order upwind vertical advection with to solve for fct_LO +! updates the input tracer ttf + subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) + use mod_mesh + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(inout) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + end subroutine +!=============================================================================== +! 1st order upwind (explicit) +! returns flux given at vertical interfaces of scalar volumes +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + end subroutine +!=============================================================================== +! QR (4th order centerd) +! returns flux given at vertical interfaces of scalar volumes +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + end subroutine +!=============================================================================== +! Vertical advection with PPM reconstruction (5th order) +! returns flux given at vertical interfaces of scalar volumes +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: n, nz, nl1 + real(kind=WP) :: tvert(mesh%nl), tv + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + end subroutine +! central difference reconstruction (2nd order, use only with FCT) +! returns flux given at vertical interfaces of scalar volumes +! IF init_zero=.TRUE. : flux will be set to zero before computation +! IF init_zero=.FALSE. : flux=flux-input flux +! flux is not multiplied with dt + subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_PARTIT + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + integer :: n, nz, nl1 + real(kind=WP) :: tvert(mesh%nl), tv + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + end subroutine + end interface +end module +!=============================================================================== +subroutine adv_tra_vert_impl(dt, w, ttf, partit, mesh) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + + implicit none + real(kind=WP), intent(in) , target :: dt + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in) , target :: mesh + real(kind=WP), intent(inout) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP) :: a(mesh%nl), b(mesh%nl), c(mesh%nl), tr(mesh%nl) + real(kind=WP) :: cp(mesh%nl), tp(mesh%nl) + integer :: nz, n, nzmax, nzmin, tr_num + real(kind=WP) :: m, zinv, dt_inv, dz + real(kind=WP) :: c1, v_adv + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + dt_inv=1.0_WP/dt + + !___________________________________________________________________________ + ! loop over local nodes + do n=1,myDim_nod2D + + ! initialise + a = 0.0_WP + b = 0.0_WP + c = 0.0_WP + tr = 0.0_WP + tp = 0.0_WP + cp = 0.0_WP + + ! max. number of levels at node n + nzmax=nlevels_nod2D(n) + + ! upper surface index, in case of cavity !=1 + nzmin=ulevels_nod2D(n) + + !___________________________________________________________________________ + ! Here can not exchange zbar_n & Z_n with zbar_3d_n & Z_3d_n because + ! they be calculate from the actualized mesh with hnode_new + ! calculate new zbar (depth of layers) and Z (mid depths of layers) + ! depending on layer thinkness over depth at node n + ! Be carefull here vertical operation have to be done on NEW vertical mesh !!! + zbar_n=0.0_WP + Z_n=0.0_WP + zbar_n(nzmax)=zbar_n_bot(n) + Z_n(nzmax-1) =zbar_n(nzmax) + hnode_new(nzmax-1,n)/2.0_WP + do nz=nzmax-1,nzmin+1,-1 + zbar_n(nz) = zbar_n(nz+1) + hnode_new(nz,n) + Z_n(nz-1) = zbar_n(nz) + hnode_new(nz-1,n)/2.0_WP + end do + zbar_n(nzmin) = zbar_n(nzmin+1) + hnode_new(nzmin,n) + + !_______________________________________________________________________ + ! Regular part of coefficients: --> surface layer + nz=nzmin + + ! 1/dz(nz) + zinv=1.0_WP*dt ! no .../(zbar(1)-zbar(2)) because of ALE + + !!PS a(nz)=0.0_WP + !!PS v_adv=zinv*areasvol(nz+1,n)/areasvol(nz,n) + !!PS b(nz)= hnode_new(nz,n)+W(nz, n)*zinv-min(0._WP, W(nz+1, n))*v_adv + !!PS c(nz)=-max(0._WP, W(nz+1, n))*v_adv + + a(nz)=0.0_WP + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + b(nz)= hnode_new(nz,n)+W(nz, n)*v_adv + + v_adv=zinv*area(nz+1,n)/areasvol(nz,n) + b(nz)= b(nz)-min(0._WP, W(nz+1, n))*v_adv + c(nz)=-max(0._WP, W(nz+1, n))*v_adv + + !_______________________________________________________________________ + ! Regular part of coefficients: --> 2nd...nl-2 layer + do nz=nzmin+1, nzmax-2 + ! update from the vertical advection + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + a(nz)=min(0._WP, W(nz, n))*v_adv + b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*v_adv + + v_adv=zinv*area(nz+1,n)/areasvol(nz,n) + b(nz)=b(nz)-min(0._WP, W(nz+1, n))*v_adv + c(nz)= -max(0._WP, W(nz+1, n))*v_adv + end do ! --> do nz=2, nzmax-2 + + !_______________________________________________________________________ + ! Regular part of coefficients: --> nl-1 layer + nz=nzmax-1 + ! update from the vertical advection + !!PS a(nz)= min(0._WP, W(nz, n))*zinv + !!PS b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*zinv + !!PS c(nz)=0.0_WP + v_adv=zinv*area(nz ,n)/areasvol(nz,n) + a(nz)= min(0._WP, W(nz, n))*v_adv + b(nz)=hnode_new(nz,n)+max(0._WP, W(nz, n))*v_adv + c(nz)=0.0_WP + + !_______________________________________________________________________ + nz=nzmin + dz=hnode_new(nz,n) ! It would be (zbar(nz)-zbar(nz+1)) if not ALE + tr(nz)=-(b(nz)-dz)*ttf(nz,n)-c(nz)*ttf(nz+1,n) + + do nz=nzmin+1,nzmax-2 + dz=hnode_new(nz,n) + tr(nz)=-a(nz)*ttf(nz-1,n)-(b(nz)-dz)*ttf(nz,n)-c(nz)*ttf(nz+1,n) + end do + nz=nzmax-1 + dz=hnode_new(nz,n) + tr(nz)=-a(nz)*ttf(nz-1,n)-(b(nz)-dz)*ttf(nz,n) + + !_______________________________________________________________________ + nz = nzmin + cp(nz) = c(nz)/b(nz) + tp(nz) = tr(nz)/b(nz) + + ! solve for vectors c-prime and t, s-prime + do nz = nzmin+1,nzmax-1 + m = b(nz)-cp(nz-1)*a(nz) + cp(nz) = c(nz)/m + tp(nz) = (tr(nz)-tp(nz-1)*a(nz))/m + end do + + !_______________________________________________________________________ + ! start with back substitution + tr(nzmax-1) = tp(nzmax-1) + + ! solve for x from the vectors c-prime and d-prime + do nz = nzmax-2, nzmin, -1 + tr(nz) = tp(nz)-cp(nz)*tr(nz+1) + end do + + !_______________________________________________________________________ + ! update tracer + do nz=nzmin,nzmax-1 + ttf(nz,n)=ttf(nz,n)+tr(nz) + end do + end do ! --> do n=1,myDim_nod2D +end subroutine adv_tra_vert_impl +! +! +!=============================================================================== +subroutine adv_tra_ver_upw1(w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP) :: tvert(mesh%nl) + integer :: n, nz, nzmax, nzmin + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + do n=1, myDim_nod2D + !_______________________________________________________________________ + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + + !_______________________________________________________________________ + ! vert. flux at surface layer + nz=nzmin + flux(nz,n)=-W(nz,n)*ttf(nz,n)*area(nz,n)-flux(nz,n) + + !_______________________________________________________________________ + ! vert. flux at bottom layer --> zero bottom flux + nz=nzmax + flux(nz,n)= 0.0_WP-flux(nz,n) + + !_______________________________________________________________________ + ! Be carefull have to do vertical tracer advection here on old vertical grid + ! also horizontal advection is done on old mesh (see helem contains old + ! mesh information) + !_______________________________________________________________________ + ! vert. flux at remaining levels + do nz=nzmin+1,nzmax-1 + flux(nz,n)=-0.5*( & + ttf(nz ,n)*(W(nz,n)+abs(W(nz,n)))+ & + ttf(nz-1,n)*(W(nz,n)-abs(W(nz,n))))*area(nz,n)-flux(nz,n) + end do + end do +end subroutine adv_tra_ver_upw1 +! +! +!=============================================================================== +subroutine adv_tra_ver_qr4c(w, ttf, partit, mesh, num_ord, flux, init_zero) + use MOD_MESH + use o_ARRAYS + use o_PARAM + use MOD_PARTIT + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: num_ord ! num_ord is the fraction of fourth-order contribution in the solution + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + real(kind=WP) :: tvert(mesh%nl) + integer :: n, nz, nzmax, nzmin + real(kind=WP) :: Tmean, Tmean1, Tmean2 + real(kind=WP) :: qc, qu, qd + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + do n=1, myDim_nod2D + !_______________________________________________________________________ + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + !_______________________________________________________________________ + ! vert. flux at surface layer + nz=nzmin + flux(nz,n)=-ttf(nz,n)*W(nz,n)*area(nz,n)-flux(nz,n) + + !_______________________________________________________________________ + ! vert. flux 2nd layer --> centered differences + nz=nzmin+1 + flux(nz,n)=-0.5_WP*(ttf(nz-1,n)+ttf(nz,n))*W(nz,n)*area(nz,n)-flux(nz,n) + + !_______________________________________________________________________ + ! vert. flux at bottom - 1 layer --> centered differences + nz=nzmax-1 + flux(nz,n)=-0.5_WP*(ttf(nz-1,n)+ttf(nz,n))*W(nz,n)*area(nz,n)-flux(nz,n) + + !_______________________________________________________________________ + ! vert. flux at bottom layer --> zero bottom flux + nz=nzmax + flux(nz,n)= 0.0_WP-flux(nz,n) + + !_______________________________________________________________________ + ! Be carefull have to do vertical tracer advection here on old vertical grid + ! also horizontal advection is done on old mesh (see helem contains old + ! mesh information) + !_______________________________________________________________________ + ! vert. flux at remaining levels + do nz=nzmin+2,nzmax-2 + !centered (4th order) + qc=(ttf(nz-1,n)-ttf(nz ,n))/(Z_3d_n(nz-1,n)-Z_3d_n(nz ,n)) + qu=(ttf(nz ,n)-ttf(nz+1,n))/(Z_3d_n(nz ,n)-Z_3d_n(nz+1,n)) + qd=(ttf(nz-2,n)-ttf(nz-1,n))/(Z_3d_n(nz-2,n)-Z_3d_n(nz-1,n)) + + Tmean1=ttf(nz ,n)+(2*qc+qu)*(zbar_3d_n(nz,n)-Z_3d_n(nz ,n))/3.0_WP + Tmean2=ttf(nz-1,n)+(2*qc+qd)*(zbar_3d_n(nz,n)-Z_3d_n(nz-1,n))/3.0_WP + Tmean =(W(nz,n)+abs(W(nz,n)))*Tmean1+(W(nz,n)-abs(W(nz,n)))*Tmean2 + ! flux(nz,n)=-0.5_WP*(num_ord*(Tmean1+Tmean2)*W(nz,n)+(1.0_WP-num_ord)*Tmean)*area(nz,n)-flux(nz,n) + flux(nz,n)=(-0.5_WP*(1.0_WP-num_ord)*Tmean - num_ord*(0.5_WP*(Tmean1+Tmean2))*W(nz,n))*area(nz,n)-flux(nz,n) + end do + end do +end subroutine adv_tra_ver_qr4c +! +! +!=============================================================================== +subroutine adv_tra_vert_ppm(dt, w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + real(kind=WP), intent(in), target :: dt + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in) , target :: mesh + real(kind=WP), intent(in) :: ttf (mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + real(kind=WP) :: tvert(mesh%nl), tv(mesh%nl), aL, aR, aj, x + real(kind=WP) :: dzjm1, dzj, dzjp1, dzjp2, deltaj, deltajp1 + integer :: n, nz, nzmax, nzmin + integer :: overshoot_counter, counter + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + ! -------------------------------------------------------------------------- + ! Vertical advection + ! -------------------------------------------------------------------------- + ! A piecewise parabolic scheme for uniformly-spaced layers. + ! See Colella and Woodward, JCP, 1984, 174-201. It can be coded so as to to take + ! non-uniformity into account, but this is more cumbersome. This is the version for AB + ! time stepping + ! -------------------------------------------------------------------------- + overshoot_counter=0 + counter =0 + do n=1, myDim_nod2D + !_______________________________________________________________________ + !Interpolate to zbar...depth levels --> all quantities (tracer ...) are + ! calculated on mid depth levels + ! nzmax ... number of depth levels at node n + nzmax=nlevels_nod2D(n) + nzmin=ulevels_nod2D(n) + + ! tracer at surface level + tv(nzmin)=ttf(nzmin,n) + ! tracer at surface+1 level +! tv(2)=-ttf(1,n)*min(sign(1.0, W(2,n)), 0._WP)+ttf(2,n)*max(sign(1.0, W(2,n)), 0._WP) +! tv(3)=-ttf(2,n)*min(sign(1.0, W(3,n)), 0._WP)+ttf(3,n)*max(sign(1.0, W(3,n)), 0._WP) + tv(nzmin+1)=0.5*(ttf(nzmin, n)+ttf(nzmin+1,n)) + ! tacer at bottom-1 level + tv(nzmax-1)=-ttf(nzmax-2,n)*min(sign(1.0_wp, W(nzmax-1,n)), 0._WP)+ttf(nzmax-1,n)*max(sign(1.0_wp, W(nzmax-1,n)), 0._WP) +! tv(nzmax-1)=0.5_WP*(ttf(nzmax-2,n)+ttf(nzmax-1,n)) + ! tracer at bottom level + tv(nzmax)=ttf(nzmax-1,n) + + !_______________________________________________________________________ + ! calc tracer for surface+2 until depth-2 layer + ! see Colella and Woodward, JCP, 1984, 174-201 --> equation (1.9) + ! loop over layers (segments) + !!PS do nz=3, nzmax-3 + do nz=nzmin+1, nzmax-3 + !___________________________________________________________________ + ! for uniform spaced vertical grids --> piecewise parabolic method (ppm) + ! equation (1.9) + ! tv(nz)=(7.0_WP*(ttf(nz-1,n)+ttf(nz,n))-(ttf(nz-2,n)+ttf(nz+1,n)))/12.0_WP + + !___________________________________________________________________ + ! for non-uniformity spaced vertical grids --> piecewise parabolic + ! method (ppm) see see Colella and Woodward, JCP, 1984, 174-201 + ! --> full equation (1.6), (1.7) and (1.8) + dzjm1 = hnode_new(nz-1,n) + dzj = hnode_new(nz ,n) + dzjp1 = hnode_new(nz+1,n) + dzjp2 = hnode_new(nz+2,n) + ! Be carefull here vertical operation have to be done on NEW vertical mesh !!! + + !___________________________________________________________________ + ! equation (1.7) + ! --> Here deltaj is the average slope in the jth zone of the parabola + ! with zone averages a_(j-1) and a_j, a_(j+1) + ! --> a_j^n + deltaj = dzj/(dzjm1+dzj+dzjp1)* & + ( & + (2._WP*dzjm1+dzj )/(dzjp1+dzj)*(ttf(nz+1,n)-ttf(nz ,n)) + & + (dzj +2._WP*dzjp1)/(dzjm1+dzj)*(ttf(nz ,n)-ttf(nz-1,n)) & + ) + ! --> a_(j+1)^n + deltajp1 = dzjp1/(dzj+dzjp1+dzjp2)* & + ( & + (2._WP*dzj+dzjp1 )/(dzjp2+dzjp1)*(ttf(nz+2,n)-ttf(nz+1,n)) + & + (dzjp1+2._WP*dzjp2)/(dzj +dzjp1)*(ttf(nz+1,n)-ttf(nz ,n)) & + ) + !___________________________________________________________________ + ! condition (1.8) + ! --> This modification leads to a somewhat steeper representation of + ! discontinuities in the solution. It also guarantees that a_(j+0.5) + ! lies in the range of values defined by a_j; and a_(j+1); + if ( (ttf(nz+1,n)-ttf(nz ,n))*(ttf(nz ,n)-ttf(nz-1,n)) > 0._WP ) then + deltaj = min( abs(deltaj), & + 2._WP*abs(ttf(nz+1,n)-ttf(nz ,n)),& + 2._WP*abs(ttf(nz ,n)-ttf(nz-1,n)) & + )*sign(1.0_WP,deltaj) + else + deltaj = 0.0_WP + endif + if ( (ttf(nz+2,n)-ttf(nz+1,n))*(ttf(nz+1,n)-ttf(nz ,n)) > 0._WP ) then + deltajp1 = min( abs(deltajp1),& + 2._WP*abs(ttf(nz+2,n)-ttf(nz+1,n)),& + 2._WP*abs(ttf(nz+1,n)-ttf(nz,n)) & + )*sign(1.0_WP,deltajp1) + else + deltajp1 = 0.0_WP + endif + !___________________________________________________________________ + ! equation (1.6) + ! --> calcualte a_(j+0.5) + ! nz+1 is the interface betweel layers (segments) nz and nz+1 + tv(nz+1)= ttf(nz,n) & + + dzj/(dzj+dzjp1)*(ttf(nz+1,n)-ttf(nz,n)) & + + 1._WP/(dzjm1+dzj+dzjp1+dzjp2) * & + ( & + (2._WP*dzjp1*dzj)/(dzj+dzjp1)* & + ((dzjm1+dzj)/(2._WP*dzj+dzjp1) - (dzjp2+dzjp1)/(2._WP*dzjp1+dzj))*(ttf(nz+1,n)-ttf(nz,n)) & + - dzj*(dzjm1+dzj)/(2._WP*dzj+dzjp1)*deltajp1 & + + dzjp1*(dzjp1+dzjp2)/(dzj+2._WP*dzjp1)*deltaj & + ) + !tv(nz+1)=max(min(ttf(nz, n), ttf(nz+1, n)), min(max(ttf(nz, n), ttf(nz+1, n)), tv(nz+1))) + end do ! --> do nz=2,nzmax-3 + + tvert(1:nzmax)=0._WP + ! loop over layers (segments) + do nz=nzmin, nzmax-1 + if ((W(nz,n)<=0._WP) .AND. (W(nz+1,n)>=0._WP)) CYCLE + counter=counter+1 + aL=tv(nz) + aR=tv(nz+1) + if ((aR-ttf(nz, n))*(ttf(nz, n)-aL)<=0._WP) then + ! write(*,*) aL, ttf(nz, n), aR + overshoot_counter=overshoot_counter+1 + aL =ttf(nz, n) + aR =ttf(nz, n) + end if + if ((aR-aL)*(ttf(nz, n)-0.5_WP*(aL+aR))> (aR-aL)**2/6._WP) then + aL =3._WP*ttf(nz, n)-2._WP*aR + end if + if ((aR-aL)*(ttf(nz, n)-0.5_WP*(aR+aL))<-(aR-aL)**2/6._WP) then + aR =3._WP*ttf(nz, n)-2._WP*aL + end if + + dzj = hnode(nz,n) + aj=6.0_WP*(ttf(nz, n)-0.5_WP*(aL+aR)) + + if (W(nz,n)>0._WP) then + x=min(W(nz,n)*dt/dzj, 1._WP) + tvert(nz )=(-aL-0.5_WP*x*(aR-aL+(1._WP-2._WP/3._WP*x)*aj)) + tvert(nz )=tvert(nz) ! compute 2nd moment for DVD + tvert(nz )=tvert(nz)*area(nz,n)*W(nz,n) + end if + + if (W(nz+1,n)<0._WP) then + x=min(-W(nz+1,n)*dt/dzj, 1._WP) + tvert(nz+1)=(-aR+0.5_WP*x*(aR-aL-(1._WP-2._WP/3._WP*x)*aj)) + tvert(nz+1)=tvert(nz+1) ! compute 2nd moment for DVD + tvert(nz+1)=tvert(nz+1)*area(nz+1,n)*W(nz+1,n) + end if + end do + + !_______________________________________________________________________ + ! Surface flux + tvert(nzmin)= -tv(nzmin)*W(nzmin,n)*area(nzmin,n) + ! Zero bottom flux + tvert(nzmax)=0.0_WP + flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) + end do ! --> do n=1, myDim_nod2D +! if (mype==0) write(*,*) 'PPM overshoot statistics:', real(overshoot_counter)/real(counter) +end subroutine adv_tra_vert_ppm +! +! +!=============================================================================== +subroutine adv_tra_ver_cdiff(w, ttf, partit, mesh, flux, init_zero) + use MOD_MESH + use MOD_TRACER + use MOD_PARTIT + use g_comm_auto + implicit none + type(t_partit),intent(in), target :: partit + type(t_mesh), intent(in), target :: mesh + real(kind=WP), intent(in) :: ttf(mesh%nl-1, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(in) :: W (mesh%nl, partit%myDim_nod2D+partit%eDim_nod2D) + real(kind=WP), intent(inout) :: flux(mesh%nl, partit%myDim_nod2D) + logical, optional :: init_zero + integer :: n, nz, nzmax, nzmin + real(kind=WP) :: tvert(mesh%nl), tv +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + + if (present(init_zero))then + if (init_zero) flux=0.0_WP + else + flux=0.0_WP + end if + + do n=1, myDim_nod2D + !_______________________________________________________________________ + nzmax=nlevels_nod2D(n)-1 + nzmin=ulevels_nod2D(n) + + !_______________________________________________________________________ + ! Surface flux + tvert(nzmin)= -W(nzmin,n)*ttf(nzmin,n)*area(nzmin,n) + + !_______________________________________________________________________ + ! Zero bottom flux + tvert(nzmax+1)=0.0_WP + + !_______________________________________________________________________ + ! Other levels + do nz=nzmin+1, nzmax + tv=0.5_WP*(ttf(nz-1,n)+ttf(nz,n)) + tvert(nz)= -tv*W(nz,n)*area(nz,n) + end do + + !_______________________________________________________________________ + flux(nzmin:nzmax, n)=tvert(nzmin:nzmax)-flux(nzmin:nzmax, n) + end do ! --> do n=1, myDim_nod2D +end subroutine adv_tra_ver_cdiff diff --git a/src/temp/oce_modules.F90 b/src/temp/oce_modules.F90 new file mode 100755 index 000000000..3576ef01f --- /dev/null +++ b/src/temp/oce_modules.F90 @@ -0,0 +1,267 @@ + +! Modules of cell-vertex ocean model +! S. Danilov, 2012 (sergey.danilov@awi.de) +! SI units are used + +!========================================================== +MODULE o_PARAM +integer, parameter :: WP=8 ! Working precision +integer, parameter :: MAX_PATH=4096 ! Maximum file path length +integer :: mstep +real(kind=WP), parameter :: pi=3.14159265358979 +real(kind=WP), parameter :: rad=pi/180.0_WP +real(kind=WP), parameter :: density_0=1030.0_WP +real(kind=WP), parameter :: density_0_r=1.0_WP/density_0 ! [m^3/kg] +real(kind=WP), parameter :: g=9.81_WP +real(kind=WP), parameter :: r_earth=6367500.0_WP +real(kind=WP), parameter :: omega=2*pi/(3600.0_WP*24.0_WP) +real(kind=WP), parameter :: vcpw=4.2e6 ![J/m^3/K] water heat cap +real(kind=WP), parameter :: inv_vcpw = 1._WP / vcpw ! inverse, to replace divide by multiply +real(kind=WP), parameter :: small=1.0e-8 !small value +integer :: state_equation = 1 !1 - full equation of state, 0 - linear equation of state + +real(kind=WP) :: C_d= 0.0025_WP ! Bottom drag coefficient +real(kind=WP) :: kappa=0.4 !von Karman's constant +real(kind=WP) :: mix_coeff_PP=0.01_WP ! mixing coef for PP scheme +real(kind=WP) :: gamma0=0.01! [m/s], gamma0*len*dt is the background viscosity +real(kind=WP) :: gamma1=0.1! [non dim.], or computation of the flow aware viscosity +real(kind=WP) :: gamma2=10.! [s/m], is only used in easy backscatter option +real(kind=WP) :: Div_c =1.0_WP !modified Leith viscosity weight +real(kind=WP) :: Leith_c=1.0_WP !Leith viscosity weight. It needs vorticity! +real(kind=WP) :: easy_bs_return=1.0 !backscatter option only (how much to return) +real(kind=WP) :: A_ver=0.001_WP ! Vertical harm. visc. +integer :: visc_option=5 +logical :: uke_scaling=.true. +real(kind=WP) :: uke_scaling_factor=1._WP +real(kind=WP) :: rosb_dis=1._WP +integer :: smooth_back=2 +integer :: smooth_dis=2 +integer :: smooth_back_tend=4 +real(kind=WP) :: K_back=600._WP +real(kind=WP) :: c_back=0.1_8 +real(kind=WP) :: K_hor=10._WP +real(kind=WP) :: K_ver=0.00001_WP +real(kind=WP) :: scale_area=2.0e8 +real(kind=WP) :: surf_relax_T= 0.0_WP +real(kind=WP) :: surf_relax_S= 10.0_WP/(60*3600.0_WP*24) +logical :: balance_salt_water =.true. +real(kind=WP) :: clim_relax= 1.0_WP/(10*3600.0_WP*24) +real(kind=WP) :: clim_decay, clim_growth + ! set to 0.0 if no relaxation +logical :: ref_sss_local=.false. +real(kind=WP) :: ref_sss=34.7 +logical :: Fer_GM =.false. !flag for Ferrari et al. (2010) GM scheme +real(kind=WP) :: K_GM_max = 3000. +real(kind=WP) :: K_GM_min = 2.0 +integer :: K_GM_bvref = 2 ! 0...surface, 1...bottom mixlay, 2...mean over mixlay +real(kind=WP) :: K_GM_resscalorder = 2.0 +real(kind=WP) :: K_GM_rampmax = 40.0 ! Resol >K_GM_rampmax[km] GM full +real(kind=WP) :: K_GM_rampmin = 30.0 ! Resol replace string by int comparison +real(KIND=WP) :: Ricr = 0.3_WP ! critical bulk Richardson Number +real(KIND=WP) :: concv = 1.6_WP ! constant for pure convection (eqn. 23) (Large 1.5-1.6; MOM default 1.8) + +logical :: hbl_diag =.false. ! writen boundary layer depth +logical :: use_global_tides=.false. ! tidal potential will be computed and used in the SSH gradient computation +! Time stepping +! real(kind=WP) :: alpha=1.0_WP, theta=1.0_WP ! implicitness for +real(kind=WP) :: alpha=1.0_WP, theta=1.0_WP ! implicitness for + ! elevation and divergence +real(kind=WP) :: epsilon=0.1_WP ! AB2 offset +! Tracers +logical :: i_vert_visc= .true. +logical :: w_split =.false. +real(kind=WP) :: w_max_cfl=1.e-5_WP + +logical :: SPP=.false. + +TYPE tracer_source3d_type + integer :: locID + integer :: ID + integer, allocatable, dimension(:) :: ind2 +END TYPE tracer_source3d_type + +type(tracer_source3d_type), & + allocatable, dimension(:) :: ptracers_restore +integer :: ptracers_restore_total=0 + + +! Momentum +logical :: free_slip=.false. + ! false=no slip +integer :: mom_adv=2 + ! 1 vector control volumes, p1 velocities + ! 2 scalar control volumes + ! 3 vector invariant + +logical :: open_b=.false. ! Reserved + +!_______________________________________________________________________________ +!--> mixing enhancement than can be applied via subroutine mo_convect(mesh) +! additionally to every mixing scheme i.e. KPP, PP, cvmix_KPP, cvmix_PP, cvmix_TKE + +! Switch for Monin-Obukov TB04 mixing --> can be additionally applied for all mixing schemes +! --> definetely recommented for KPP +logical :: use_momix = .true. !.false. !Monin-Obukhov -> TB04 mixing on/off +real(kind=WP) :: momix_lat = -50.0_WP ! latitudinal treshhold to apply mo_on enhanced +! convection +logical :: use_instabmix = .true. +real(kind=WP) :: instabmix_kv = 0.1 + +! Switch for enhanced wind mixing --> nasty trick from pp mixing in FESOM1.4 +logical :: use_windmix = .false. +real(kind=WP) :: windmix_kv = 1.e-3 +integer :: windmix_nl = 2 + +!_______________________________________________________________________________ +! use non-constant reference density if .false. density_ref=density_0 +logical :: use_density_ref = .false. +real(kind=WP) :: density_ref_T = 2.0_WP +real(kind=WP) :: density_ref_S = 34.0_WP + +!_______________________________________________________________________________ +! use k-profile nonlocal fluxes +logical :: use_kpp_nonlclflx = .false. + +!_______________________________________________________________________________ +! *** active tracer cutoff +logical :: limit_salinity=.true. !set an allowed range for salinity +real(kind=WP) :: salinity_min=5.0 !minimal salinity +real(kind=WP) :: coeff_limit_salinity=0.0023 !m/s, coefficient to restore s to s_min + + namelist /tracer_cutoff/ limit_salinity, salinity_min, coeff_limit_salinity + +! *** others *** + real(kind=WP) :: time_sum=0.0 ! for runtime estimate + +!___________________________________________ +! Pressure Gradient Force calculation (pgf) +! calculation of pgf either: +! only linfs: +! > 'nemo' ... like NEMO (interpolate to elemental depth, inter-/extrapolation) +! linfs, zlevel, zstar: +! > 'shchepetkin' ... based on density jacobian +! > 'cubicspline' ... like in FESOM1.4 +! > 'easypgf' ... interpolate pressure on elemental depth +character(20) :: which_pgf='shchepetkin' + + + NAMELIST /oce_dyn/ state_equation, C_d, A_ver, gamma0, gamma1, gamma2, Leith_c, Div_c, easy_bs_return, & + scale_area, mom_adv, free_slip, i_vert_visc, w_split, w_max_cfl, SPP,& + Fer_GM, K_GM_max, K_GM_min, K_GM_bvref, K_GM_resscalorder, K_GM_rampmax, K_GM_rampmin, & + scaling_Ferreira, scaling_Rossby, scaling_resolution, scaling_FESOM14, & + Redi, visc_sh_limit, mix_scheme, Ricr, concv, which_pgf, visc_option, alpha, theta, use_density_ref, & + K_back, c_back, uke_scaling, uke_scaling_factor, smooth_back, smooth_dis, & + smooth_back_tend, rosb_dis + + NAMELIST /tracer_phys/ diff_sh_limit, Kv0_const, double_diffusion, K_ver, K_hor, surf_relax_T, surf_relax_S, & + balance_salt_water, clim_relax, ref_sss_local, ref_sss, & + use_momix, momix_lat, momix_kv, & + use_instabmix, instabmix_kv, & + use_windmix, windmix_kv, windmix_nl, & + use_kpp_nonlclflx + +END MODULE o_PARAM +!========================================================== +MODULE o_ARRAYS +USE o_PARAM +IMPLICIT NONE +! Arrays are described in subroutine array_setup +real(kind=WP), allocatable, target :: Wvel(:,:), Wvel_e(:,:), Wvel_i(:,:) +real(kind=WP), allocatable :: UV(:,:,:) +real(kind=WP), allocatable :: UV_rhs(:,:,:), UV_rhsAB(:,:,:) +real(kind=WP), allocatable :: uke(:,:), v_back(:,:), uke_back(:,:), uke_dis(:,:), uke_dif(:,:) +real(kind=WP), allocatable :: uke_rhs(:,:), uke_rhs_old(:,:) +real(kind=WP), allocatable :: UV_dis_tend(:,:,:), UV_back_tend(:,:,:), UV_total_tend(:,:,:), UV_dis_tend_node(:,:,:) +real(kind=WP), allocatable :: UV_dis_posdef_b2(:,:), UV_dis_posdef(:,:), UV_back_posdef(:,:) +real(kind=WP), allocatable :: eta_n(:), d_eta(:) +real(kind=WP), allocatable :: ssh_rhs(:), hpressure(:,:) +real(kind=WP), allocatable :: CFL_z(:,:) +real(kind=WP), allocatable :: stress_surf(:,:) +real(kind=WP), allocatable :: stress_node_surf(:,:) +REAL(kind=WP), ALLOCATABLE :: stress_atmoce_x(:) +REAL(kind=WP), ALLOCATABLE :: stress_atmoce_y(:) +real(kind=WP), allocatable :: heat_flux(:), Tsurf(:) +real(kind=WP), allocatable :: heat_flux_in(:) !to keep the unmodified (by SW penetration etc.) heat flux +real(kind=WP), allocatable :: water_flux(:), Ssurf(:) +real(kind=WP), allocatable :: virtual_salt(:), relax_salt(:) +real(kind=WP), allocatable :: Tclim(:,:), Sclim(:,:) +real(kind=WP), allocatable :: Visc(:,:) +real(kind=WP), allocatable :: Tsurf_t(:,:), Ssurf_t(:,:) +real(kind=WP), allocatable :: tau_x_t(:,:), tau_y_t(:,:) +real(kind=WP), allocatable :: heat_flux_t(:,:), heat_rel_t(:,:), heat_rel(:) +real(kind=WP), allocatable :: coriolis(:), coriolis_node(:) +real(kind=WP), allocatable :: relax2clim(:) +real(kind=WP), allocatable :: MLD1(:), MLD2(:) +integer, allocatable :: MLD1_ind(:), MLD2_ind(:) +real(kind=WP), allocatable :: ssh_gp(:) +!Tracer gradients&RHS +real(kind=WP), allocatable :: ttrhs(:,:) +real(kind=WP), allocatable :: tr_xy(:,:,:) +real(kind=WP), allocatable :: tr_z(:,:) + +! Auxiliary arrays for vector-invariant form of momentum advection +real(kind=WP), allocatable,dimension(:,:) :: vorticity + +!Viscosity and diff coefs +real(kind=WP), allocatable,dimension(:,:) :: Av,Kv +real(kind=WP), allocatable,dimension(:,:,:) :: Kv_double +real(kind=WP), allocatable,dimension(:) :: Kv0 +!Velocities interpolated to nodes +real(kind=WP), allocatable,dimension(:,:,:) :: Unode + +! Auxiliary arrays to store Redi-GM fields +real(kind=WP), allocatable,dimension(:,:,:) :: neutral_slope +real(kind=WP), allocatable,dimension(:,:,:) :: slope_tapered +real(kind=WP), allocatable,dimension(:,:,:) :: sigma_xy +real(kind=WP), allocatable,dimension(:,:) :: sw_beta, sw_alpha +real(kind=WP), allocatable,dimension(:) :: dens_flux +!real(kind=WP), allocatable,dimension(:,:,:) :: tsh, tsv, tsh_nodes +!real(kind=WP), allocatable,dimension(:,:) :: hd_flux,vd_flux +!Isoneutral diffusivities (or xy diffusivities if Redi=.false) +real(kind=WP), allocatable :: Ki(:,:) + +! --> auxiliary array to store an intermediate part of the rhs computations. +real(kind=WP), allocatable,dimension(:) :: ssh_rhs_old !, ssh_rhs_old2 !PS +real(kind=WP) :: is_nonlinfs + +!_______________________________________________________________________________ +! Arrays added for pressure gradient force calculation +real(kind=WP), allocatable,dimension(:,:) :: density_m_rho0 +real(kind=WP), allocatable,dimension(:,:) :: density_m_rho0_slev +real(kind=WP), allocatable,dimension(:,:) :: density_ref +real(kind=WP), allocatable,dimension(:,:) :: density_dmoc +real(kind=WP), allocatable,dimension(:,:) :: pgf_x, pgf_y + +!_______________________________________________________________________________ +!!PS ! dummy arrays +real(kind=WP), allocatable,dimension(:,:) :: dum_3d_n !, dum_3d_e +!!PS real(kind=WP), allocatable,dimension(:) :: dum_2d_n, dum_2d_e + +!_______________________________________________________________________________ +!Monin-Obukhov correction +real(kind=WP),allocatable :: mo(:,:),mixlength(:) +!GM_stuff +real(kind=WP),allocatable :: bvfreq(:,:),mixlay_dep(:),bv_ref(:) + +real(kind=WP), allocatable :: fer_UV(:,:,:), fer_wvel(:,:) +real(kind=WP), target, allocatable :: fer_c(:), fer_scal(:), fer_K(:,:), fer_gamma(:,:,:) + +real(kind=WP), allocatable :: ice_rejected_salt(:) +END MODULE o_ARRAYS +!========================================================== diff --git a/src/toy_channel_soufflet.F90 b/src/toy_channel_soufflet.F90 index 06e1d60ac..164f015d4 100644 --- a/src/toy_channel_soufflet.F90 +++ b/src/toy_channel_soufflet.F90 @@ -1,9 +1,13 @@ MODULE Toy_Channel_Soufflet - use mod_mesh + USE MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_TRACER + USE MOD_DYN USE o_ARRAYS USE o_PARAM - USE g_PARSUP USE g_config + use g_comm_auto implicit none SAVE @@ -41,13 +45,22 @@ MODULE Toy_Channel_Soufflet ! !-------------------------------------------------------------------------------------------- ! -subroutine relax_zonal_vel(mesh) +subroutine relax_zonal_vel(dynamics, partit, mesh) implicit none integer :: elem, nz, nn, nn1 real(kind=WP) :: a, yy, uzon - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" - + + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + real(kind=WP), dimension(:,:,:), pointer :: UV_rhs + +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV_rhs=>dynamics%uv_rhs(:,:,:) + DO elem=1, myDim_elem2D ! ======== ! Interpolation @@ -70,16 +83,22 @@ subroutine relax_zonal_vel(mesh) UV_rhs(1,nz,elem) = UV_rhs(1,nz,elem)+dt*tau_inv*(Uclim(nz,elem)-Uzon) END DO END DO + call exchange_elem(UV_rhs, partit) end subroutine relax_zonal_vel !========================================================================== -subroutine relax_zonal_temp(mesh) +subroutine relax_zonal_temp(tdata, partit, mesh) implicit none - integer :: n, nz, nn, nn1 - real(kind=WP) :: yy, a, Tzon - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + integer :: n, nz, nn, nn1 + real(kind=WP) :: yy, a, Tzon + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer_data), intent(inout), target :: tdata +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" - do n=1, myDim_nod2D + do n=1, myDim_nod2D+eDim_nod2D yy=coord_nod2D(2,n)-lat0 a=0 if (yy dynamics%uv(:,:,:) ztem=0. zvel=0. DO elem=1,myDim_elem2D if(elem2D_nodes(1,elem)>myDim_nod2D) cycle Do nz=1,nlevels(elem)-1 - ztem(nz,bpos(elem))=ztem(nz,bpos(elem))+sum(tr_arr(nz,elem2D_nodes(:,elem),1))/3.0_8 + ztem(nz,bpos(elem))=ztem(nz,bpos(elem))+sum(tracers%data(1)%values(nz,elem2D_nodes(:,elem)))/3.0_8 zvel(nz,bpos(elem))=zvel(nz,bpos(elem))+UV(1,nz,elem) END DO END DO @@ -214,26 +244,35 @@ subroutine compute_zonal_mean(mesh) end subroutine compute_zonal_mean ! ==================================================================================== -subroutine initial_state_soufflet(mesh) +subroutine initial_state_soufflet(dynamics, tracers, partit, mesh) ! Profiles Soufflet 2016 (OM) implicit none - type(t_mesh), intent(in) , target :: mesh + type(t_mesh) , intent(inout), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(inout), target :: tracers + type(t_dyn) , intent(inout), target :: dynamics + integer :: n, nz, elnodes(3) real(kind=8) :: dst, yn, Fy, Lx ! real(kind=8) :: Ljet,rhomax,Sb, drho_No, drho_So ! real(kind=8) :: z_No, z_So,dz_No,dz_So, drhosurf_No, drhosurf_So, zsurf real(kind=8) :: d_No(mesh%nl-1), d_So(mesh%nl-1), rho_No(mesh%nl-1), rho_So(mesh%nl-1) -#include "associate_mesh.h" + real(kind=WP), dimension(:,:,:), pointer :: UV +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) dy=ysize/nybins/r_earth ! Default values - stress_surf = 0.0 - heat_flux = 0.0_8 - tr_arr(:,:,2) = 35.0_8 - Ssurf = tr_arr(1,:,1) - water_flux = 0.0_8 - relax2clim = 0.0_8 + stress_surf = 0.0_WP + heat_flux = 0.0_WP + tracers%data(2)%values = 35.0_WP + Ssurf = tracers%data(2)%values(1,:) + water_flux = 0.0_WP + relax2clim = 0.0_WP ! Have to set density_0=1028._WP in oce_modules.F90 ! ======== @@ -276,21 +315,21 @@ subroutine initial_state_soufflet(mesh) end if end if do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz, n,1)=rho_So(nz)+(rho_No(nz)-rho_So(nz))*(1.0-Fy) + tracers%data(1)%values(nz,n)=rho_So(nz)+(rho_No(nz)-rho_So(nz))*(1.0-Fy) end do end do ! ======== ! Make consistent ! ======== - Tsurf=tr_arr(1,:,1) - Tclim=tr_arr(:,:,1) + Tsurf=tracers%data(1)%values(1,:) + Tclim=tracers%data(1)%values(:,:) ! ======== ! add small perturbation: do n=1, myDim_nod2D+eDim_nod2D dst=(coord_nod2D(2, n)-lat0)*r_earth do nz=1, nlevels_nod2D(n)-1 - tr_arr(nz,n,1)=tr_arr(nz,n,1)-0.1*sin(2*pi*dst/ysize)*exp(2*Z(nz)/zsize) & + tracers%data(1)%values(nz,n)=tracers%data(1)%values(nz,n)-0.1*sin(2*pi*dst/ysize)*exp(2*Z(nz)/zsize) & *(sin(8*pi*coord_nod2D(1,n)*r_earth/xsize)+ & 0.5*sin(3*pi*coord_nod2D(1,n)*r_earth/xsize)) end do @@ -298,41 +337,54 @@ subroutine initial_state_soufflet(mesh) ! ======= ! Compute geostrophically balanced flow ! ======= - write(*,*) mype, 'T', maxval(tr_arr(:,:,1)), minval(tr_arr(:,:,1)) + write(*,*) mype, 'T', maxval(tracers%data(1)%values), minval(tracers%data(1)%values) ! Redefine Coriolis (to agree with the Soufflet paper) DO n=1,myDim_elem2D elnodes=elem2D_nodes(:,n) dst=(sum(coord_nod2D(2, elnodes))/3.0-lat0)*r_earth-ysize/2 - coriolis(n)=1.0e-4+dst*1.6e-11 + mesh%coriolis(n)=1.0e-4+dst*1.6e-11 END DO - write(*,*) mype, 'COR', maxval(coriolis*10000.0), minval(coriolis*10000.0) + write(*,*) mype, 'COR', maxval(mesh%coriolis*10000.0), minval(mesh%coriolis*10000.0) DO n=1,myDim_elem2D - elnodes=elem2D_nodes(:,n) - ! Thermal wind \partial_z UV(1,:,:)=(g/rho_0/f)\partial_y rho - DO nz=1,nlevels(n)-1 - d_No(nz)=(-(0.00025_WP*density_0)*g/density_0/coriolis(n))*sum(gradient_sca(4:6,n)*Tclim(nz, elnodes)) - ! d_N is used here as a placeholder - ! -(sw_alpha*density_0) here is from the equation of state d\rho=-(sw_alpha*density_0) dT - END DO - ! Vertical integration - nz=nlevels(n)-1 - UV(1,nz,n)=d_No(nz)*(Z(nz)-zbar(nz+1)) - DO nz=nlevels(n)-2,1,-1 - UV(1,nz,n)=UV(1,nz+1,n)+d_No(nz+1)*(zbar(nz+1)-Z(nz+1))+d_No(nz)*(Z(nz)-zbar(nz+1)) - END DO + elnodes=elem2D_nodes(:,n) + ! Thermal wind \partial_z UV(1,:,:)=(g/rho_0/f)\partial_y rho + DO nz=1,nlevels(n)-1 + d_No(nz)=(-(0.00025_WP*density_0)*g/density_0/mesh%coriolis(n))*sum(gradient_sca(4:6,n)*Tclim(nz, elnodes)) + ! d_N is used here as a placeholder + ! -(sw_alpha*density_0) here is from the equation of state d\rho=-(sw_alpha*density_0) dT + END DO + ! Vertical integration + nz=nlevels(n)-1 + UV(1,nz,n)=d_No(nz)*(Z(nz)-zbar(nz+1)) + DO nz=nlevels(n)-2,1,-1 + UV(1,nz,n)=UV(1,nz+1,n)+d_No(nz+1)*(zbar(nz+1)-Z(nz+1))+d_No(nz)*(Z(nz)-zbar(nz+1)) + END DO END DO - allocate(Uclim(nl-1,myDim_elem2D)) - Uclim=UV(1,:,:) + call exchange_elem(UV, partit) + + allocate(Uclim(nl-1,myDim_elem2D+eDim_elem2D)) + Uclim=UV(1,:,:) write(*,*) mype, 'Vel', maxval(UV(1,:,:)), minval(UV(1,:,:)) END subroutine initial_state_soufflet ! =============================================================================== -subroutine energy_out_soufflet(mesh) +subroutine energy_out_soufflet(dynamics, partit, mesh) implicit none real(kind=8) :: tke(2), aux(2), ww, wwaux integer :: elem, nz, m, elnodes(3), nybins - real(kind=8), allocatable :: zvel1D(:), znum1D(:) - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" + real(kind=8), allocatable :: zvel1D(:), znum1D(:) + type(t_dyn) , intent(inout), target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_mesh) , intent(in) , target :: mesh + +real(kind=WP), dimension(:,:,:), pointer :: UV +real(kind=WP), dimension(:,:), pointer :: Wvel +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" +UV => dynamics%uv(:,:,:) +Wvel => dynamics%w(:,:) + nybins=100 zvel=0. diff --git a/src/write_step_info.F90 b/src/write_step_info.F90 index b6232a0e2..a9c5cfd78 100644 --- a/src/write_step_info.F90 +++ b/src/write_step_info.F90 @@ -1,492 +1,544 @@ -module write_step_info_interface - interface - subroutine write_step_info(istep,outfreq, mesh) - use MOD_MESH - integer :: istep,outfreq - type(t_mesh), intent(in) , target :: mesh - end subroutine - end interface -end module - -! -! -!=============================================================================== -subroutine write_step_info(istep,outfreq, mesh) - use g_config, only: dt, use_ice - use MOD_MESH - use o_PARAM - use g_PARSUP - use o_ARRAYS - use i_ARRAYS - use g_comm_auto - implicit none - - integer :: n, istep,outfreq - real(kind=WP) :: int_eta, int_hbar, int_wflux, int_hflux, int_temp, int_salt - real(kind=WP) :: min_eta, min_hbar, min_wflux, min_hflux, min_temp, min_salt, & - min_wvel,min_hnode,min_deta,min_wvel2,min_hnode2, & - min_vvel, min_vvel2, min_uvel, min_uvel2 - real(kind=WP) :: max_eta, max_hbar, max_wflux, max_hflux, max_temp, max_salt, & - max_wvel, max_hnode, max_deta, max_wvel2, max_hnode2, max_m_ice, & - max_vvel, max_vvel2, max_uvel, max_uvel2, & - max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av - real(kind=WP) :: int_deta , int_dhbar - real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt - type(t_mesh), intent(in) , target :: mesh -#include "associate_mesh.h" - if (mod(istep,outfreq)==0) then - - !_______________________________________________________________________ - int_eta =0. - int_hbar =0. - int_deta =0. - int_dhbar =0. - int_wflux =0. - int_hflux =0. - int_temp =0. - int_salt =0. - loc_eta =0. - loc_hbar =0. - loc_deta =0. - loc_dhbar =0. - loc_wflux =0. -!!PS loc_hflux =0. -!!PS loc_temp =0. -!!PS loc_salt =0. - loc =0. - !_______________________________________________________________________ - do n=1, myDim_nod2D - loc_eta = loc_eta + area(1, n)*eta_n(n) - loc_hbar = loc_hbar + area(1, n)*hbar(n) - loc_deta = loc_deta + area(1, n)*d_eta(n) - loc_dhbar = loc_dhbar + area(1, n)*(hbar(n)-hbar_old(n)) - loc_wflux = loc_wflux + area(1, n)*water_flux(n) -!!PS loc_hflux = loc_hflux + area(1, n)*heat_flux(n) -!!PS loc_temp = loc_temp + area(1, n)*sum(tr_arr(:,n,1))/(nlevels_nod2D(n)-1) -!!PS loc_salt = loc_salt + area(1, n)*sum(tr_arr(:,n,2))/(nlevels_nod2D(n)-1) - end do - - !_______________________________________________________________________ - call MPI_AllREDUCE(loc_eta , int_eta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_hbar , int_hbar , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_deta , int_deta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_dhbar, int_dhbar, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - call MPI_AllREDUCE(loc_wflux, int_wflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -!!PS call MPI_AllREDUCE(loc_hflux, int_hflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -!!PS call MPI_AllREDUCE(loc_temp , int_temp , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) -!!PS call MPI_AllREDUCE(loc_salt , int_salt , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) - int_eta = int_eta /ocean_area - int_hbar = int_hbar /ocean_area - int_deta = int_deta /ocean_area - int_dhbar= int_dhbar/ocean_area - int_wflux= int_wflux/ocean_area -!!PS int_hflux= int_hflux/ocean_area -!!PS int_temp = int_temp /ocean_area -!!PS int_salt = int_salt /ocean_area - - !_______________________________________________________________________ - loc = minval(eta_n(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_eta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(hbar(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(water_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(heat_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tr_arr(:,1:myDim_nod2D,1),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) - call MPI_AllREDUCE(loc , min_temp , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(tr_arr(:,1:myDim_nod2D,2),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) - call MPI_AllREDUCE(loc , min_salt , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Wvel(1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Wvel(2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(1,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(1,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(2,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(Unode(2,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(d_eta(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , min_deta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(hnode(1,1:myDim_nod2D),MASK=(hnode(1,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , min_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - loc = minval(hnode(2,1:myDim_nod2D),MASK=(hnode(2,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , min_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) - - !_______________________________________________________________________ - loc = maxval(eta_n(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_eta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(hbar(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(water_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(heat_flux(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tr_arr(:,1:myDim_nod2D,1),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) - call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(tr_arr(:,1:myDim_nod2D,2),MASK=(tr_arr(:,1:myDim_nod2D,2)/=0.0)) - call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Wvel(1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Wvel(2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(1,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(1,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(2,1,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(Unode(2,2,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(d_eta(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_deta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(hnode(1,1:myDim_nod2D),MASK=(hnode(1,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , max_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(hnode(2,1:myDim_nod2D),MASK=(hnode(2,1:myDim_nod2D)/=0.0)) - call MPI_AllREDUCE(loc , max_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(CFL_z(:,1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_cfl_z , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(abs(pgf_x(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_pgfx , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(abs(pgf_y(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_pgfy , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (use_ice) then - loc = maxval(m_ice(1:myDim_nod2D)) - call MPI_AllREDUCE(loc , max_m_ice , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - end if - loc = maxval(abs(Av(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_av , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - loc = maxval(abs(Kv(:,1:myDim_nod2D))) - call MPI_AllREDUCE(loc , max_kv , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) - !_______________________________________________________________________ - if (mype==0) then - write(*,*) '___CHECK GLOBAL OCEAN VARIABLES --> mstep=',mstep - write(*,*) ' ___global estimat of eta & hbar____________________' - write(*,*) ' int(eta), int(hbar) =', int_eta, int_hbar - write(*,*) ' --> error(eta-hbar) =', int_eta-int_hbar - write(*,*) ' min(eta) , max(eta) =', min_eta, max_eta - write(*,*) ' max(hbar), max(hbar) =', min_hbar, max_hbar - write(*,*) - write(*,*) ' int(deta), int(dhbar) =', int_deta, int_dhbar - write(*,*) ' --> error(deta-dhbar) =', int_deta-int_dhbar - write(*,*) ' --> error(deta-wflux) =', int_deta-int_wflux - write(*,*) ' --> error(dhbar-wflux) =', int_dhbar-int_wflux - write(*,*) - write(*,*) ' -int(wflux)*dt =', int_wflux*dt*(-1.0) - write(*,*) ' int(deta )-int(wflux)*dt =', int_deta-int_wflux*dt*(-1.0) - write(*,*) ' int(dhbar)-int(wflux)*dt =', int_dhbar-int_wflux*dt*(-1.0) - write(*,*) - write(*,*) ' ___global min/max/mean --> mstep=',mstep,'____________' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' eta= ', min_eta ,' | ',max_eta ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' deta= ', min_deta ,' | ',max_deta ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hbar= ', min_hbar ,' | ',max_hbar ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' wflux= ', min_wflux,' | ',max_wflux,' | ',int_wflux - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' hflux= ', min_hflux,' | ',max_hflux,' | ',int_hflux - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' temp= ', min_temp ,' | ',max_temp ,' | ',int_temp - write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' salt= ', min_salt ,' | ',max_salt ,' | ',int_salt - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(1,:)= ', min_wvel ,' | ',max_wvel ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(2,:)= ', min_wvel2,' | ',max_wvel2,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(1,:)= ', min_uvel ,' | ',max_uvel ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(2,:)= ', min_uvel2,' | ',max_uvel2,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(1,:)= ', min_vvel ,' | ',max_vvel ,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(2,:)= ', min_vvel2,' | ',max_vvel2,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(1,:)= ', min_hnode,' | ',max_hnode,' | ','N.A.' - write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(2,:)= ', min_hnode2,' | ',max_hnode2,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' cfl_z= ',' N.A. ',' | ',max_cfl_z ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' pgf_x= ',' N.A. ',' | ',max_pgfx ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' pgf_y= ',' N.A. ',' | ',max_pgfy ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' Av= ',' N.A. ',' | ',max_av ,' | ','N.A.' - write(*,"(A, A , A, ES10.3, A, A )") ' Kv= ',' N.A. ',' | ',max_kv ,' | ','N.A.' - if (use_ice) write(*,"(A, A , A, ES10.3, A, A )") ' m_ice= ',' N.A. ',' | ',max_m_ice ,' | ','N.A.' - write(*,*) - endif - endif ! --> if (mod(istep,logfile_outfreq)==0) then -end subroutine write_step_info -! -! -!=============================================================================== -subroutine check_blowup(istep, mesh) - use g_config, only: logfile_outfreq, which_ALE - use MOD_MESH - use o_PARAM - use g_PARSUP - use o_ARRAYS - use i_ARRAYS - use g_comm_auto - use io_BLOWUP - use g_forcing_arrays - use diagnostics - use write_step_info_interface - implicit none - - integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 - integer :: el, elidx - type(t_mesh), intent(in), target :: mesh -#include "associate_mesh.h" - !___________________________________________________________________________ -! ! if (mod(istep,logfile_outfreq)==0) then -! ! if (mype==0) then -! ! write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep -! ! write(*,*) -! ! endif - do n=1, myDim_nod2d - - !___________________________________________________________________ - ! check ssh - if ( ((eta_n(n) /= eta_n(n)) .or. & - eta_n(n)<-50.0 .or. eta_n(n)>50.0)) then -!!PS eta_n(n)<-10.0 .or. eta_n(n)>10.0)) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found eta_n become NaN or <-10.0, >10.0' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - write(*,*) 'eta_n(n) = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) - write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) - write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) - write(*,*) - write(*,*) 'ssh_rhs = ',ssh_rhs(n),', ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'hbar = ',hbar(n),', hbar_old = ',hbar_old(n) - write(*,*) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) - write(*,*) 'u_wind = ',u_wind(n),', v_wind = ',v_wind(n) - write(*,*) - do nz=1,nod_in_elem2D_num(n) - write(*,*) 'stress_surf(1:2,',nz,') = ',stress_surf(:,nod_in_elem2D(nz,n)) - end do - write(*,*) - write(*,*) 'm_ice = ',m_ice(n),', m_ice_old = ',m_ice_old(n) - write(*,*) 'a_ice = ',a_ice(n),', a_ice_old = ',a_ice_old(n) - write(*,*) 'thdgr = ',thdgr(n),', thdgr_old = ',thdgr_old(n) - write(*,*) 'thdgrsn = ',thdgrsn(n) - write(*,*) - if (lcurt_stress_surf) then - write(*,*) 'curl_stress_surf = ',curl_stress_surf(n) - write(*,*) - endif - do el=1,nod_in_elem2d_num(n) - elidx = nod_in_elem2D(el,n) - write(*,*) ' elem#=',el,', elemidx=',elidx - write(*,*) ' pgf_x =',pgf_x(:,elidx) - write(*,*) ' pgf_y =',pgf_y(:,elidx) -! write(*,*) ' U =',UV(1,:,elidx) -! write(*,*) ' V =',UV(2,:,elidx) - write(*,*) - enddo - write(*,*) 'Wvel(1, n) = ',Wvel(1,n) - write(*,*) 'Wvel(:, n) = ',Wvel(:,n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) - write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(:,n) - write(*,*) - endif - - !___________________________________________________________________ - ! check surface vertical velocity --> in case of zlevel and zstar - ! vertical coordinate its indicator if Volume is conserved for - ! Wvel(1,n)~maschine preccision - if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) .or. abs(Wvel(1,n))>1e-12 )) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found surface layer vertical velocity becomes NaN or >1e-12' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) - write(*,*) 'Wvel(1, n) = ',Wvel(1,n) - write(*,*) 'Wvel(:, n) = ',Wvel(:,n) - write(*,*) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(:,n) - write(*,*) 'hflux = ',heat_flux(n) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) - write(*,*) 'eta_n = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) 'hbar = ',hbar(n) - write(*,*) 'hbar_old = ',hbar_old(n) - write(*,*) 'ssh_rhs = ',ssh_rhs(n) - write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) - - end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... - - !___________________________________________________________________ - ! check surface layer thinknesss - if ( .not. trim(which_ALE)=='linfs' .and. ( hnode(1, n) /= hnode(1, n) .or. hnode(1,n)< 0 )) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found surface layer thickness becomes NaN or <0' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) - write(*,*) 'hnode(1, n) = ',hnode(1,n) - write(*,*) 'hnode(:, n) = ',hnode(:,n) - write(*,*) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... - - - do nz=1,nlevels_nod2D(n)-1 - !_______________________________________________________________ - ! check temp - if ( (tr_arr(nz, n,1) /= tr_arr(nz, n,1)) .or. & - tr_arr(nz, n,1) < -5.0 .or. tr_arr(nz, n,1)>60) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found temperture becomes NaN or <-5.0, >60' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'lon,lat = ',geo_coord_nod2D(:,n)/rad - write(*,*) 'nz = ',nz - write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) - write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad - write(*,*) 'z=', Z_n(nz) - write(*,*) 'temp(nz, n) = ',tr_arr(nz, n,1) - write(*,*) 'temp(: , n) = ',tr_arr(:, n,1) - write(*,*) 'temp_old(nz,n)= ',tr_arr_old(nz, n,1) - write(*,*) 'temp_old(: ,n)= ',tr_arr_old(:, n,1) - write(*,*) - write(*,*) 'hflux = ',heat_flux(n) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) - write(*,*) 'eta_n = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) 'hbar = ',hbar(n) - write(*,*) 'hbar_old = ',hbar_old(n) - write(*,*) 'ssh_rhs = ',ssh_rhs(n) - write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'm_ice = ',m_ice(n) - write(*,*) 'm_ice_old = ',m_ice_old(n) - write(*,*) 'm_snow = ',m_snow(n) - write(*,*) 'm_snow_old = ',m_snow_old(n) - write(*,*) - write(*,*) 'hnode = ',hnode(:,n) - write(*,*) 'hnode_new = ',hnode_new(:,n) - write(*,*) - write(*,*) 'Kv = ',Kv(:,n) - write(*,*) - write(*,*) 'W = ',Wvel(:,n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) -! do el=1,nod_in_elem2d_num(n) -! elidx = nod_in_elem2D(el,n) -! write(*,*) ' elem#=',el,', elemidx=',elidx -! write(*,*) ' helem =',helem(:,elidx) -! write(*,*) ' U =',UV(1,:,elidx) -! write(*,*) ' V =',UV(2,:,elidx) -! enddo - write(*,*) - - endif ! --> if ( (tr_arr(nz, n,1) /= tr_arr(nz, n,1)) .or. & ... - - !_______________________________________________________________ - ! check salt - if ( (tr_arr(nz, n,2) /= tr_arr(nz, n,2)) .or. & - tr_arr(nz, n,2) < 0 .or. tr_arr(nz, n,2)>50 ) then - found_blowup_loc=1 - write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep - write(*,*) ' --STOP--> found salinity becomes NaN or <0, >50' - write(*,*) 'mype = ',mype - write(*,*) 'mstep = ',istep - write(*,*) 'node = ',n - write(*,*) 'nz = ',nz - write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) - write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad - write(*,*) 'z=', Z_n(nz) - write(*,*) 'salt(nz, n) = ',tr_arr(nz, n,2) - write(*,*) 'salt(: , n) = ',tr_arr(:, n,2) - write(*,*) - write(*,*) 'temp(nz, n) = ',tr_arr(nz, n,1) - write(*,*) 'temp(: , n) = ',tr_arr(:, n,1) - write(*,*) - write(*,*) 'hflux = ',heat_flux(n) - write(*,*) - write(*,*) 'wflux = ',water_flux(n) - write(*,*) 'eta_n = ',eta_n(n) - write(*,*) 'd_eta(n) = ',d_eta(n) - write(*,*) 'hbar = ',hbar(n) - write(*,*) 'hbar_old = ',hbar_old(n) - write(*,*) 'ssh_rhs = ',ssh_rhs(n) - write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) - write(*,*) - write(*,*) 'hnode = ',hnode(:,n) - write(*,*) 'hnode_new = ',hnode_new(:,n) - write(*,*) - write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) - write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) - write(*,*) - write(*,*) 'Kv = ',Kv(:,n) - write(*,*) - do el=1,nod_in_elem2d_num(n) - elidx = nod_in_elem2D(el,n) - write(*,*) ' elem#=',el,', elemidx=',elidx - write(*,*) ' Av =',Av(:,elidx) -! write(*,*) ' helem =',helem(:,elidx) -! write(*,*) ' U =',UV(1,:,elidx) -! write(*,*) ' V =',UV(2,:,elidx) - enddo - write(*,*) 'Wvel = ',Wvel(:,n) - write(*,*) - write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) - write(*,*) - write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad - write(*,*) - endif ! --> if ( (tr_arr(nz, n,2) /= tr_arr(nz, n,2)) .or. & ... - end do ! --> do nz=1,nlevels_nod2D(n)-1 - end do ! --> do n=1, myDim_nod2d -! ! end if - - !_______________________________________________________________________ - ! check globally if one of the cpus hat a blowup situation. if its the - ! case CPU mype==0 needs to write out the stuff. Write out occurs in - ! moment only over CPU mype==0 - call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) - if (found_blowup==1) then - call write_step_info(istep,1,mesh) - if (mype==0) then - call sleep(1) - write(*,*) - write(*,*) ' MODEL BLOW UP !!!' - write(*,*) ' ____' - write(*,*) ' __,-~~/~ `---.' - write(*,*) ' _/_,---( , )' - write(*,*) ' __ / < / ) \___' - write(*,*) '- -- ----===;;;`====------------------===;;;===---- -- -' - write(*,*) ' \/ ~"~"~"~"~"~\~"~)~"/' - write(*,*) ' (_ ( \ ( > \)' - write(*,*) ' \_( _ < >_>`' - write(*,*) ' ~ `-i` ::>|--"' - write(*,*) ' I;|.|.|' - write(*,*) ' <|i::|i|`' - write(*,*) ' (` ^`"`- ")' - write(*,*) ' _____.,-#%&$@%#&#~,._____' - write(*,*) - end if - call blowup(istep, mesh) - if (mype==0) write(*,*) ' --> finished writing blow up file' - call par_ex - endif -end subroutine + +module write_step_info_interface + interface + subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN + use MOD_ICE + integer :: istep,outfreq + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice + end subroutine + end interface +end module +module check_blowup_interface + interface + subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN + use MOD_ICE + integer :: istep + type(t_mesh), intent(in), target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in), target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice + end subroutine + end interface +end module +! +! +!=============================================================================== +subroutine write_step_info(istep, outfreq, ice, dynamics, tracers, partit, mesh) + use g_config, only: dt, use_ice + use MOD_MESH + USE MOD_PARTIT + USE MOD_PARSUP + use MOD_TRACER + use MOD_DYN + use MOD_ICE + use o_PARAM + use o_ARRAYS, only: water_flux, heat_flux, & + pgf_x, pgf_y, Av, Kv + use g_comm_auto + use g_support + implicit none + + integer :: n, istep,outfreq + real(kind=WP) :: int_eta, int_hbar, int_wflux, int_hflux, int_temp, int_salt + real(kind=WP) :: min_eta, min_hbar, min_wflux, min_hflux, min_temp, min_salt, & + min_wvel,min_hnode,min_deta,min_wvel2,min_hnode2, & + min_vvel, min_vvel2, min_uvel, min_uvel2 + real(kind=WP) :: max_eta, max_hbar, max_wflux, max_hflux, max_temp, max_salt, & + max_wvel, max_hnode, max_deta, max_wvel2, max_hnode2, max_m_ice, & + max_vvel, max_vvel2, max_uvel, max_uvel2, & + max_cfl_z, max_pgfx, max_pgfy, max_kv, max_av + real(kind=WP) :: int_deta , int_dhbar + real(kind=WP) :: loc, loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux,loc_hflux, loc_temp, loc_salt + type(t_mesh), intent(in) , target :: mesh + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_dyn) , intent(in) , target :: dynamics + type(t_ice) , intent(in) , target :: ice + real(kind=WP), dimension(:,:,:), pointer :: UV, UVnode + real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta, m_ice +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + UVnode => dynamics%uvnode(:,:,:) + Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) + m_ice => ice%data(2)%values(:) + if (mod(istep,outfreq)==0) then + + !_______________________________________________________________________ + int_eta =0. + int_hbar =0. + int_deta =0. + int_dhbar =0. + int_wflux =0. + int_hflux =0. + int_temp =0. + int_salt =0. + loc_eta =0. + loc_hbar =0. + loc_deta =0. + loc_dhbar =0. + loc_wflux =0. + loc =0. + !_______________________________________________________________________ +#if !defined(__openmp_reproducible) +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n) REDUCTION(+:loc_eta, loc_hbar, loc_deta, loc_dhbar, loc_wflux) +#endif + do n=1, myDim_nod2D + loc_eta = loc_eta + areasvol(ulevels_nod2D(n), n)*eta_n(n) + loc_hbar = loc_hbar + areasvol(ulevels_nod2D(n), n)*hbar(n) + loc_deta = loc_deta + areasvol(ulevels_nod2D(n), n)*d_eta(n) + loc_dhbar = loc_dhbar + areasvol(ulevels_nod2D(n), n)*(hbar(n)-hbar_old(n)) + loc_wflux = loc_wflux + areasvol(ulevels_nod2D(n), n)*water_flux(n) + end do +#if !defined(__openmp_reproducible) +!$OMP END PARALLEL DO +#endif + !_______________________________________________________________________ + call MPI_AllREDUCE(loc_eta , int_eta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_hbar , int_hbar , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_deta , int_deta , 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_dhbar, int_dhbar, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + call MPI_AllREDUCE(loc_wflux, int_wflux, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_FESOM, MPIerr) + + int_eta = int_eta /ocean_areawithcav + int_hbar = int_hbar /ocean_areawithcav + int_deta = int_deta /ocean_areawithcav + int_dhbar= int_dhbar/ocean_areawithcav + int_wflux= int_wflux/ocean_areawithcav + !_______________________________________________________________________ + loc=omp_min_max_sum1(eta_n, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_eta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hbar, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(water_flux, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(heat_flux, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(tracers%data(1)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0_WP) + call MPI_AllREDUCE(loc , min_temp , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'min', partit, 0.0_WP) + call MPI_AllREDUCE(loc , min_salt , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(Wvel(1,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(Wvel(2,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(1,1,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(1,2,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_uvel2, 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(2,1,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(2,2,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(d_eta, 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_deta , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hnode(1,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hnode(2,:), 1, myDim_nod2D, 'min', partit) + call MPI_AllREDUCE(loc , min_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MIN, MPI_COMM_FESOM, MPIerr) + + !_______________________________________________________________________ + loc=omp_min_max_sum1(eta_n, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_eta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hbar, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_hbar , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(water_flux, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_wflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(heat_flux, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_hflux, 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(tracers%data(1)%values, 1, nl-1, 1, myDim_nod2D, 'max', partit, 0.0_WP) + call MPI_AllREDUCE(loc , max_temp , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(tracers%data(2)%values, 1, nl-1, 1, myDim_nod2D, 'max', partit, 0.0_WP) + call MPI_AllREDUCE(loc , max_salt , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(Wvel(1,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_wvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(Wvel(2,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_wvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(1,1,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_uvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(1,2,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_uvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(2,1,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_vvel , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(UVnode(2,2,:), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_vvel2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(d_eta, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_deta , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hnode(1, :), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_hnode , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum1(hnode(2, :), 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_hnode2 , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(CFL_z, 1, nl-1, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_cfl_z , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(pgf_x, 1, nl-1, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_pgfx , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(pgf_y, 1, nl-1, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_pgfy , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + if (use_ice) then + loc=omp_min_max_sum1(m_ice, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_m_ice , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + end if + loc=omp_min_max_sum2(Av, 1, nl, 1, myDim_elem2D, 'max', partit) + call MPI_AllREDUCE(loc , max_av , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + loc=omp_min_max_sum2(Av, 1, nl, 1, myDim_nod2D, 'max', partit) + call MPI_AllREDUCE(loc , max_kv , 1, MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_FESOM, MPIerr) + !_______________________________________________________________________ + if (mype==0) then + write(*,*) '___CHECK GLOBAL OCEAN VARIABLES --> mstep=',mstep + write(*,*) ' ___global estimat of eta & hbar____________________' + write(*,*) ' int(eta), int(hbar) =', int_eta, int_hbar + write(*,*) ' --> error(eta-hbar) =', int_eta-int_hbar + write(*,*) ' min(eta) , max(eta) =', min_eta, max_eta + write(*,*) ' max(hbar), max(hbar) =', min_hbar, max_hbar + write(*,*) + write(*,*) ' int(deta), int(dhbar) =', int_deta, int_dhbar + write(*,*) ' --> error(deta-dhbar) =', int_deta-int_dhbar + write(*,*) ' --> error(deta-wflux) =', int_deta-int_wflux + write(*,*) ' --> error(dhbar-wflux) =', int_dhbar-int_wflux + write(*,*) + write(*,*) ' -int(wflux)*dt =', int_wflux*dt*(-1.0) + write(*,*) ' int(deta )-int(wflux)*dt =', int_deta-int_wflux*dt*(-1.0) + write(*,*) ' int(dhbar)-int(wflux)*dt =', int_dhbar-int_wflux*dt*(-1.0) + write(*,*) + write(*,*) ' ___global min/max/mean --> mstep=',mstep,'____________' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' eta= ', min_eta ,' | ',max_eta ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' deta= ', min_deta ,' | ',max_deta ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hbar= ', min_hbar ,' | ',max_hbar ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' wflux= ', min_wflux,' | ',max_wflux,' | ',int_wflux + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' hflux= ', min_hflux,' | ',max_hflux,' | ',int_hflux + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' temp= ', min_temp ,' | ',max_temp ,' | ',int_temp + write(*,"(A, ES10.3, A, ES10.3, A, ES10.3)") ' salt= ', min_salt ,' | ',max_salt ,' | ',int_salt + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(1,:)= ', min_wvel ,' | ',max_wvel ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' wvel(2,:)= ', min_wvel2,' | ',max_wvel2,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(1,:)= ', min_uvel ,' | ',max_uvel ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' uvel(2,:)= ', min_uvel2,' | ',max_uvel2,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(1,:)= ', min_vvel ,' | ',max_vvel ,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' vvel(2,:)= ', min_vvel2,' | ',max_vvel2,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(1,:)= ', min_hnode,' | ',max_hnode,' | ','N.A.' + write(*,"(A, ES10.3, A, ES10.3, A, A )") ' hnode(2,:)= ', min_hnode2,' | ',max_hnode2,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' cfl_z= ',' N.A. ',' | ',max_cfl_z ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' pgf_x= ',' N.A. ',' | ',max_pgfx ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' pgf_y= ',' N.A. ',' | ',max_pgfy ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' Av= ',' N.A. ',' | ',max_av ,' | ','N.A.' + write(*,"(A, A , A, ES10.3, A, A )") ' Kv= ',' N.A. ',' | ',max_kv ,' | ','N.A.' + if (use_ice) then + write(*,"(A, A , A, ES10.3, A, A)") ' m_ice= ',' N.A. ',' | ',max_m_ice ,' | ','N.A.' + end if + end if + endif ! --> if (mod(istep,logfile_outfreq)==0) then +end subroutine write_step_info +! +! +!=============================================================================== +subroutine check_blowup(istep, ice, dynamics, tracers, partit, mesh) + USE MOD_ICE + USE MOD_DYN + USE MOD_TRACER + USE MOD_PARTIT + USE MOD_PARSUP + USE MOD_MESH + use g_config, only: logfile_outfreq, which_ALE + use o_PARAM + use o_ARRAYS, only: water_flux, stress_surf, & + heat_flux, Kv, Av + use g_comm_auto + use io_BLOWUP + use g_forcing_arrays + use diagnostics + use write_step_info_interface + implicit none + + type(t_ice) , intent(in) , target :: ice + type(t_dyn) , intent(in) , target :: dynamics + type(t_partit), intent(inout), target :: partit + type(t_tracer), intent(in) , target :: tracers + type(t_mesh) , intent(in) , target :: mesh + !___________________________________________________________________________ + integer :: n, nz, istep, found_blowup_loc=0, found_blowup=0 + integer :: el, elidx + !___________________________________________________________________________ + ! pointer on necessary derived types + real(kind=WP), dimension(:,:,:), pointer :: UV + real(kind=WP), dimension(:,:) , pointer :: Wvel, CFL_z + real(kind=WP), dimension(:) , pointer :: ssh_rhs, ssh_rhs_old + real(kind=WP), dimension(:) , pointer :: eta_n, d_eta + real(kind=WP), dimension(:) , pointer :: u_ice, v_ice + real(kind=WP), dimension(:) , pointer :: a_ice, m_ice, m_snow + real(kind=WP), dimension(:) , pointer :: a_ice_old, m_ice_old, m_snow_old +#include "associate_part_def.h" +#include "associate_mesh_def.h" +#include "associate_part_ass.h" +#include "associate_mesh_ass.h" + UV => dynamics%uv(:,:,:) + Wvel => dynamics%w(:,:) + CFL_z => dynamics%cfl_z(:,:) + ssh_rhs => dynamics%ssh_rhs(:) + ssh_rhs_old => dynamics%ssh_rhs_old(:) + eta_n => dynamics%eta_n(:) + d_eta => dynamics%d_eta(:) + u_ice => ice%uice(:) + v_ice => ice%vice(:) + a_ice => ice%data(1)%values(:) + m_ice => ice%data(2)%values(:) + m_snow => ice%data(3)%values(:) + a_ice_old => ice%data(1)%values_old(:) + m_ice_old => ice%data(2)%values_old(:) + m_snow_old => ice%data(3)%values_old(:) + + !___________________________________________________________________________ +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(n, nz) + do n=1, myDim_nod2d + !___________________________________________________________________ + ! check ssh + if ( ((eta_n(n) /= eta_n(n)) .or. eta_n(n)<-50.0 .or. eta_n(n)>50.0 .or. (d_eta(n) /= d_eta(n)) ) ) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found eta_n become NaN or <-10.0, >10.0' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) + write(*,*) 'eta_n(n) = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) + write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) + write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) + write(*,*) + write(*,*) 'ssh_rhs = ',ssh_rhs(n),', ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'hbar = ',hbar(n),', hbar_old = ',hbar_old(n) + write(*,*) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) + write(*,*) 'u_wind = ',u_wind(n),', v_wind = ',v_wind(n) + write(*,*) + do nz=1,nod_in_elem2D_num(n) + write(*,*) 'stress_surf(1:2,',nz,') = ',stress_surf(:,nod_in_elem2D(nz,n)) + end do + write(*,*) + write(*,*) 'm_ice = ',m_ice(n),', m_ice_old = ',m_ice_old(n) + write(*,*) 'a_ice = ',a_ice(n),', a_ice_old = ',a_ice_old(n) + write(*,*) + write(*,*) 'Wvel(:, n) = ',Wvel(ulevels_nod2D(n):nlevels_nod2D(n),n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(ulevels_nod2D(n):nlevels_nod2D(n),n) + write(*,*) + write(*,*) 'hnode(:, n) = ',hnode(ulevels_nod2D(n):nlevels_nod2D(n),n) + write(*,*) +!$OMP END CRITICAL + endif + + !___________________________________________________________________ + ! check surface vertical velocity --> in case of zlevel and zstar + ! vertical coordinate its indicator if Volume is conserved for + ! Wvel(1,n)~maschine preccision + if ( .not. trim(which_ALE)=='linfs' .and. ( Wvel(1, n) /= Wvel(1, n) )) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found surface layer vertical velocity becomes NaN or >1e-12' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'uln, nln = ',ulevels_nod2D(n), nlevels_nod2D(n) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) + write(*,*) 'Wvel(1, n) = ',Wvel(1,n) + write(*,*) 'Wvel(:, n) = ',Wvel(:,n) + write(*,*) + write(*,*) 'hnode(1, n) = ',hnode(1,n) + write(*,*) 'hnode(:, n) = ',hnode(:,n) + write(*,*) 'hflux = ',heat_flux(n) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) + write(*,*) 'eta_n = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) 'hbar = ',hbar(n) + write(*,*) 'hbar_old = ',hbar_old(n) + write(*,*) 'ssh_rhs = ',ssh_rhs(n) + write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) +!$OMP END CRITICAL + end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... + + !___________________________________________________________________ + ! check surface layer thinknesss + if ( .not. trim(which_ALE)=='linfs' .and. ( hnode(1, n) /= hnode(1, n) .or. hnode(1,n)< 0 )) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found surface layer thickness becomes NaN or <0' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) + write(*,*) 'hnode(1, n) = ',hnode(1,n) + write(*,*) 'hnode(:, n) = ',hnode(:,n) + write(*,*) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) +!$OMP END CRITICAL + end if ! --> if ( .not. trim(which_ALE)=='linfs' .and. ... + + + do nz=1,nlevels_nod2D(n)-1 + !_______________________________________________________________ + ! check temp + if ( (tracers%data(1)%values(nz, n) /= tracers%data(1)%values(nz, n)) .or. & + tracers%data(1)%values(nz, n) < -5.0 .or. tracers%data(1)%values(nz, n)>60) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found temperture becomes NaN or <-5.0, >60' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'lon,lat = ',geo_coord_nod2D(:,n)/rad + write(*,*) 'nz = ',nz + write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) + write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad + write(*,*) 'temp(nz, n) = ',tracers%data(1)%values(nz, n) + write(*,*) 'temp(: , n) = ',tracers%data(1)%values(:, n) + write(*,*) 'temp_old(nz,n)= ',tracers%data(1)%valuesAB(nz, n) + write(*,*) 'temp_old(: ,n)= ',tracers%data(1)%valuesAB(:, n) + write(*,*) + write(*,*) 'hflux = ',heat_flux(n) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) + write(*,*) 'eta_n = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) 'hbar = ',hbar(n) + write(*,*) 'hbar_old = ',hbar_old(n) + write(*,*) 'ssh_rhs = ',ssh_rhs(n) + write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'm_ice = ',m_ice(n) + write(*,*) 'm_ice_old = ',m_ice_old(n) + write(*,*) 'm_snow = ',m_snow(n) + write(*,*) 'm_snow_old = ',m_snow_old(n) + write(*,*) + write(*,*) 'hnode = ',hnode(:,n) + write(*,*) 'hnode_new = ',hnode_new(:,n) + write(*,*) + write(*,*) 'Kv = ',Kv(:,n) + write(*,*) + write(*,*) 'W = ',Wvel(:,n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) + write(*,*) +!$OMP END CRITICAL + endif ! --> if ( (tracers%data(1)%values(nz, n) /= tracers%data(1)%values(nz, n)) .or. & ... + + !_______________________________________________________________ + ! check salt + if ( (tracers%data(2)%values(nz, n) /= tracers%data(2)%values(nz, n)) .or. & + tracers%data(2)%values(nz, n) < 0 .or. tracers%data(2)%values(nz, n)>50 ) then +!$OMP CRITICAL + found_blowup_loc=1 + write(*,*) '___CHECK FOR BLOW UP___________ --> mstep=',istep + write(*,*) ' --STOP--> found salinity becomes NaN or <0, >50' + write(*,*) 'mype = ',mype + write(*,*) 'mstep = ',istep + write(*,*) 'node = ',n + write(*,*) 'nz = ',nz + write(*,*) 'nzmin, nzmax= ',ulevels_nod2D(n),nlevels_nod2D(n) + write(*,*) 'x=', geo_coord_nod2D(1,n)/rad, ' ; ', 'y=', geo_coord_nod2D(2,n)/rad + write(*,*) 'salt(nz, n) = ',tracers%data(2)%values(nz, n) + write(*,*) 'salt(: , n) = ',tracers%data(2)%values(:, n) + write(*,*) + write(*,*) 'temp(nz, n) = ',tracers%data(1)%values(nz, n) + write(*,*) 'temp(: , n) = ',tracers%data(1)%values(:, n) + write(*,*) + write(*,*) 'hflux = ',heat_flux(n) + write(*,*) + write(*,*) 'wflux = ',water_flux(n) + write(*,*) 'eta_n = ',eta_n(n) + write(*,*) 'd_eta(n) = ',d_eta(n) + write(*,*) 'hbar = ',hbar(n) + write(*,*) 'hbar_old = ',hbar_old(n) + write(*,*) 'ssh_rhs = ',ssh_rhs(n) + write(*,*) 'ssh_rhs_old = ',ssh_rhs_old(n) + write(*,*) + write(*,*) 'hnode = ',hnode(:,n) + write(*,*) 'hnode_new = ',hnode_new(:,n) + write(*,*) + write(*,*) 'zbar_3d_n = ',zbar_3d_n(:,n) + write(*,*) 'Z_3d_n = ',Z_3d_n(:,n) + write(*,*) + write(*,*) 'Kv = ',Kv(:,n) + write(*,*) + do el=1,nod_in_elem2d_num(n) + elidx = nod_in_elem2D(el,n) + write(*,*) ' elem#=',el,', elemidx=',elidx + write(*,*) ' Av =',Av(:,elidx) + enddo + write(*,*) 'Wvel = ',Wvel(:,n) + write(*,*) + write(*,*) 'CFL_z(:,n) = ',CFL_z(:,n) + write(*,*) + write(*,*) 'glon,glat = ',geo_coord_nod2D(:,n)/rad + write(*,*) +!$OMP END CRITICAL + endif ! --> if ( (tracers%data(2)%values(nz, n) /= tracers%data(2)%values(nz, n)) .or. & ... + end do ! --> do nz=1,nlevels_nod2D(n)-1 + end do ! --> do n=1, myDim_nod2d +!$OMP END PARALLEL DO + !_______________________________________________________________________ + ! check globally if one of the cpus hat a blowup situation. if its the + ! case CPU mype==0 needs to write out the stuff. Write out occurs in + ! moment only over CPU mype==0 + call MPI_AllREDUCE(found_blowup_loc , found_blowup , 1, MPI_INTEGER, MPI_MAX, MPI_COMM_FESOM, MPIerr) + if (found_blowup==1) then + call write_step_info(istep, 1, ice, dynamics, tracers, partit, mesh) + if (mype==0) then + call sleep(1) + write(*,*) + write(*,*) ' MODEL BLOW UP !!!' + write(*,*) ' ____' + write(*,*) ' __,-~~/~ `---.' + write(*,*) ' _/_,---( , )' + write(*,*) ' __ / < / ) \___' + write(*,*) '- -- ----===;;;`====------------------===;;;===---- -- -' + write(*,*) ' \/ ~"~"~"~"~"~\~"~)~"/' + write(*,*) ' (_ ( \ ( > \)' + write(*,*) ' \_( _ < >_>`' + write(*,*) ' ~ `-i` ::>|--"' + write(*,*) ' I;|.|.|' + write(*,*) ' <|i::|i|`' + write(*,*) ' (` ^`"`- ")' + write(*,*) ' _____.,-#%&$@%#&#~,._____' + write(*,*) + end if + call blowup(istep, ice, dynamics, tracers, partit, mesh) + if (mype==0) write(*,*) ' --> finished writing blow up file' + call par_ex(partit%MPI_COMM_FESOM, partit%mype) + endif +end subroutine +!=============================================================================== diff --git a/test.sh b/test.sh new file mode 100755 index 000000000..81fe8ec34 --- /dev/null +++ b/test.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Run simples FESOM2 test in a container. +# +# With singularity on ollie +# +# module load singularity/3.5.1 +# cd fesom2 +# singularity exec /home/ollie/nkolduno/SINGULARITY/fesom_refactoring.sif ./test.sh +# +# With docker on Linux/Mac +# docker run -it -v "$(pwd)"/fesom2:/fesom/fesom2 koldunovn/fesom2_test:refactoring /bin/bash +# cd fesom2 +# ./test.sh +# + +set -e + +machine="docker" +tests="test_pi" + +for test in $tests; do + + ./configure.sh ubuntu + echo $test + mkrun pi $test -m $machine + cd work_pi + chmod +x job_docker_new + ./job_docker_new + fcheck . + cd ../ + +done diff --git a/test/fortran/CMakeLists.txt b/test/fortran/CMakeLists.txt index 47bb252b1..a6d1212a9 100644 --- a/test/fortran/CMakeLists.txt +++ b/test/fortran/CMakeLists.txt @@ -12,6 +12,12 @@ add_library(${LIB_TARGET} ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_a ${CMAKE_CURRENT_LIST_DIR}/../../src/async_threads_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_provider_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/forcing_lookahead_reader_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_nf_interface.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_file_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_attribute_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/io_fesom_file.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/gen_modules_partitioning.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_gather.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_scatter.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/io_netcdf_workaround_module.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/mpi_topology_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/MOD_MESH.F90 ${CMAKE_CURRENT_LIST_DIR}/../../src/oce_modules.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../src/fortran_utils.F90 ) add_subdirectory(../../src/async_threads_cpp ${PROJECT_BINARY_DIR}/async_threads_cpp) @@ -21,10 +27,9 @@ target_link_libraries(${LIB_TARGET} ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRA target_link_libraries(${LIB_TARGET} async_threads_cpp) set_target_properties(${LIB_TARGET} PROPERTIES Fortran_MODULE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) +file(GLOB sources_pfunit RELATIVE ${CMAKE_CURRENT_LIST_DIR} ${CMAKE_CURRENT_LIST_DIR}/*.pf) add_pfunit_ctest (${PROJECT_NAME} - TEST_SOURCES forcing_provider_module_tests.pf # must be a path relative to CMAKE_CURRENT_SOURCE_DIR, then the generated files will be placed in CMAKE_CURRENT_BINARY_DIR (see add_pfunit_ctest.cmake) - forcing_provider_netcdf_module_tests.pf - forcing_lookahead_reader_module_tests.pf + TEST_SOURCES ${sources_pfunit} # must be a path relative to CMAKE_CURRENT_SOURCE_DIR, then the generated files will be placed in CMAKE_CURRENT_BINARY_DIR (see add_pfunit_ctest.cmake) LINK_LIBRARIES ${LIB_TARGET} ) @@ -34,6 +39,10 @@ if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel) elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU) target_compile_options(${LIB_TARGET} PRIVATE -cpp -ffree-line-length-none) target_compile_options(${PROJECT_NAME} PRIVATE -cpp -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${LIB_TARGET} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() endif() add_custom_command( diff --git a/test/fortran/fixtures/io_netcdf/columnwise_2d_sss.nc b/test/fortran/fixtures/io_netcdf/columnwise_2d_sss.nc new file mode 100644 index 000000000..33e9f0d2a Binary files /dev/null and b/test/fortran/fixtures/io_netcdf/columnwise_2d_sss.nc differ diff --git a/test/fortran/fixtures/io_netcdf/columnwise_3d_salt.nc b/test/fortran/fixtures/io_netcdf/columnwise_3d_salt.nc new file mode 100644 index 000000000..770e4ce1e Binary files /dev/null and b/test/fortran/fixtures/io_netcdf/columnwise_3d_salt.nc differ diff --git a/test/fortran/forcing_provider_netcdf_module_tests.pf b/test/fortran/forcing_provider_netcdf_module_tests.pf index 21b1eaa65..109083bd5 100644 --- a/test/fortran/forcing_provider_netcdf_module_tests.pf +++ b/test/fortran/forcing_provider_netcdf_module_tests.pf @@ -53,7 +53,7 @@ contains @assertEqual(0.0000, values(1,1), tolerance=1.e-6) @assertEqual(0.0001, values(2,1), tolerance=1.e-6) @assertEqual(0.0007, values(2,3), tolerance=1.e-6) - + call handle%finalize() end subroutine diff --git a/test/fortran/fortran_utils_tests.pf b/test/fortran/fortran_utils_tests.pf new file mode 100644 index 000000000..0ca78da2b --- /dev/null +++ b/test/fortran/fortran_utils_tests.pf @@ -0,0 +1,44 @@ +module fortran_utils_tests + use fortran_utils + use funit; implicit none + +contains + + + @test + subroutine test_2_digits_results_in_2_characters_string() + @assertEqual("12", int_to_txt(12)) + end subroutine + + + @test + subroutine test_1_digit_results_in_1_character_string + @assertEqual("1", int_to_txt(1)) + end subroutine + + + @test + subroutine test_0_results_in_0_character_string + @assertEqual("0", int_to_txt(0)) + end subroutine + + + @test + subroutine test_1_digit_padded_to_3_results_in_3_character_string + @assertEqual("001", int_to_txt_pad(1,3)) + end subroutine + + @test + subroutine test_3_digit_padded_to_1_results_in_3_character_string + @assertEqual("123", int_to_txt_pad(123,1)) + end subroutine + + + @test + subroutine test_0_padded_to_0_results_in_0_character_string + @assertEqual("0", int_to_txt_pad(0,0)) + end subroutine + + + +end module diff --git a/test/fortran/io_fesom_file_module_tests.pf b/test/fortran/io_fesom_file_module_tests.pf new file mode 100644 index 000000000..54eee817f --- /dev/null +++ b/test/fortran/io_fesom_file_module_tests.pf @@ -0,0 +1,81 @@ +module io_fesom_file_module_tests + use io_fesom_file_module + use funit; implicit none + + character(len=*), parameter :: TMPPATHPREFIX = "./io_fesom_file_module_tests_DAEA1C34_F042_4243_AA88_273E4AA9D4A6__" + +contains + + + @test + subroutine can_be_initialized() + type(fesom_file_type) f + integer, parameter :: mesh_nod2d = 3140 + integer, parameter :: mesh_elem2d = 5839 + integer, parameter :: mesh_nl = 48 + + call f%init(mesh_nod2d, mesh_elem2d, mesh_nl) + end subroutine + + + @test + subroutine rec_count_returns_neg1_for_an_unattached_file() + type(fesom_file_type) f + integer, parameter :: mesh_nod2d = 3140 + integer, parameter :: mesh_elem2d = 5839 + integer, parameter :: mesh_nl = 48 + + call f%init(mesh_nod2d, mesh_elem2d, mesh_nl) + + @assertEqual(-1, f%rec_count()) + end subroutine + + + @test + subroutine rec_count_returns_0_for_a_newly_created_file() + character(len=*), parameter :: filepath = TMPPATHPREFIX//"rec_count_returns_0_for_a_newly_created_file.nc" + integer exitstat + type(fesom_file_type) f + integer, parameter :: mesh_nod2d = 3140 + integer, parameter :: mesh_elem2d = 5839 + integer, parameter :: mesh_nl = 48 + + call execute_command_line("rm -f "//filepath) ! silently remove the file if it exists from an aborted previous run + + call f%init(mesh_nod2d, mesh_elem2d, mesh_nl) + call f%open_write_create(filepath) + + @assertEqual(0, f%rec_count()) + + call f%close_file() + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine rec_count_returns_2_for_existing_file_with_2_timesteps() + type(fesom_file_type) f + integer, parameter :: mesh_nod2d = 3140 + integer, parameter :: mesh_elem2d = 5839 + integer, parameter :: mesh_nl = 48 + + integer, allocatable :: varshape(:) + integer node_dimidx, time_dimidx + integer varindex + + call f%init(mesh_nod2d, mesh_elem2d, mesh_nl) + + time_dimidx = f%add_dim_unlimited("time") + node_dimidx = f%add_dim("nod2", 5) + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + call f%read_var_shape(varindex, varshape) + + @assertEqual(2, f%rec_count()) + + call f%close_file() + end subroutine + +end module diff --git a/test/fortran/io_netcdf_file_module_tests.pf b/test/fortran/io_netcdf_file_module_tests.pf new file mode 100644 index 000000000..8a1f80492 --- /dev/null +++ b/test/fortran/io_netcdf_file_module_tests.pf @@ -0,0 +1,747 @@ +module io_netcdf_file_module_tests + use io_netcdf_file_module + use funit; implicit none + + character(len=*), parameter :: TMPPATHPREFIX = "./io_netcdf_file_module_tests_DAEA1C34_F042_4243_AA88_273E4AA9D4A6__" + +contains + + ! utility procedure to grep a NetCDF file for a string + function is_in_file_header(filepath, searchtext) result(is_in) + character(len=*), intent(in) :: filepath + character(len=*), intent(in) :: searchtext + logical is_in + ! EO parameters + integer exitstat + + call execute_command_line("ncdump -h -p9,17 "//filepath//" | grep -q '"//searchtext//"'", exitstat=exitstat) + is_in = (exitstat == 0) + end function + + + ! utility procedure to grep a NetCDF file for a string + function is_in_file(filepath, searchtext) result(is_in) + character(len=*), intent(in) :: filepath + character(len=*), intent(in) :: searchtext + logical is_in + ! EO parameters + integer exitstat + + call execute_command_line("ncdump -p9,17 "//filepath//" | grep -q '"//searchtext//"'", exitstat=exitstat) + is_in = (exitstat == 0) + end function + + + @test + subroutine test_can_initialize_without_filepath() + type(netcdf_file_type) f + + call f%initialize() + end subroutine + + + @test + subroutine test_can_add_dims() + type(netcdf_file_type) f + integer nz_dimidx, node_dimidx + + call f%initialize() + nz_dimidx = f%add_dim("nz", 47) + @assertEqual(1, nz_dimidx) + node_dimidx = f%add_dim("node", 47) + @assertEqual(2, node_dimidx) + end subroutine + + + @test + subroutine test_can_add_unlimited_dim() + type(netcdf_file_type) f + integer dimidx + + call f%initialize() + dimidx = f%add_dim_unlimited("time") + @assertEqual(1, dimidx) + end subroutine + + + @test + subroutine test_can_query_ndims() + type(netcdf_file_type) f + integer nz_dimidx, node_dimidx + + call f%initialize() + nz_dimidx = f%add_dim("nz", 48) + node_dimidx = f%add_dim("nz_1", 47) + + @assertEqual(2, f%ndims()) + + end subroutine + + + @test + subroutine test_can_add_vars_with_attributes() + type(netcdf_file_type) f + integer nz_dimidx, node_dimidx + integer salt_varid + + call f%initialize() + nz_dimidx = f%add_dim("nz", 47) + @assertEqual(1, nz_dimidx) + node_dimidx = f%add_dim("node", 47) + @assertEqual(2, node_dimidx) + + salt_varid = f%add_var_real("salt", [1,2]) + call f%add_var_att(salt_varid, "units", "psu") + call f%add_var_att(salt_varid, "long_name", "salinity") + end subroutine + + + @test + subroutine test_can_add_global_attribute_text() + type(netcdf_file_type) f + + call f%initialize() + call f%add_global_att("FESOM_model", "FESOM2") + end subroutine + + + @test + subroutine test_can_add_global_attribute_int() + type(netcdf_file_type) f + + call f%initialize() + call f%add_global_att("FESOM_force_rotation", 0) + end subroutine + + + @test + subroutine test_can_open_file_in_readmode_without_expecting_dims_and_vars() + type(netcdf_file_type) f + + call f%initialize() + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") + call f%close_file() + end subroutine + + + @test + subroutine test_can_open_file_with_unlimited_dim() + type(netcdf_file_type) f + integer dimidx + + call f%initialize() + dimidx = f%add_dim_unlimited("time") + + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") + + call f%close_file() + end subroutine + + + @test + subroutine test_can_open_file_with_variable() + type(netcdf_file_type) f + integer nz_dimidx, node_dimidx, time_dimidx + integer salt_varid + call f%initialize() + nz_dimidx = f%add_dim("nz1", 3) + @assertEqual(1, nz_dimidx) + node_dimidx = f%add_dim("nod2", 5) + @assertEqual(2, node_dimidx) + time_dimidx = f%add_dim_unlimited("time") + @assertEqual(3, time_dimidx) + + salt_varid = f%add_var_real("salt", [nz_dimidx,node_dimidx,time_dimidx]) + call f%add_var_att(salt_varid, "units", "psu") + call f%add_var_att(salt_varid, "long_name", "salinity") + + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_2d_variable_real4() + type(netcdf_file_type) f + real(4), allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + allocate(values(5)) + call f%read_var(sss_varindex, [1,1], [5,1], values) + ! check level 1 values + @assertEqual(1.001, values(1), tolerance=1.e-6) + @assertEqual(1.002, values(2), tolerance=1.e-6) + @assertEqual(1.003, values(3), tolerance=1.e-6) + @assertEqual(1.004, values(4), tolerance=1.e-6) + @assertEqual(1.005, values(5), tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_2d_variable_real8() + type(netcdf_file_type) f + real(8), allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + allocate(values(5)) + call f%read_var(sss_varindex, [1,1], [5,1], values) + ! check level 1 values + @assertEqual(1.001, values(1), tolerance=1.e-6) + @assertEqual(1.002, values(2), tolerance=1.e-6) + @assertEqual(1.003, values(3), tolerance=1.e-6) + @assertEqual(1.004, values(4), tolerance=1.e-6) + @assertEqual(1.005, values(5), tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_3d_variable_real4() + type(netcdf_file_type) f + real(4), allocatable :: values(:,:) + + integer node_dimidx, time_dimidx, z_dimidx + integer varindex + + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + + varindex = f%add_var_real("salt", [z_dimidx, node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") + + allocate(values(3,5)) + call f%read_var(varindex, [1,1,1], [3,5,1], values) + ! check level 1 values + @assertEqual(1.001, values(1,1), tolerance=1.e-6) + @assertEqual(1.002, values(1,2), tolerance=1.e-6) + @assertEqual(1.003, values(1,3), tolerance=1.e-6) + @assertEqual(1.004, values(1,4), tolerance=1.e-6) + @assertEqual(1.005, values(1,5), tolerance=1.e-6) + + ! check level 2 values + @assertEqual(2.001, values(2,1), tolerance=1.e-6) + @assertEqual(2.002, values(2,2), tolerance=1.e-6) + @assertEqual(2.003, values(2,3), tolerance=1.e-6) + @assertEqual(2.004, values(2,4), tolerance=1.e-6) + @assertEqual(2.005, values(2,5), tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_3d_variable_real8() + type(netcdf_file_type) f + real(8), allocatable :: values(:,:) + + integer node_dimidx, time_dimidx, z_dimidx + integer varindex + + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + + varindex = f%add_var_real("salt", [z_dimidx, node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_3d_salt.nc") + + allocate(values(3,5)) + call f%read_var(varindex, [1,1,1], [3,5,1], values) + ! check level 1 values + @assertEqual(1.001, values(1,1), tolerance=1.e-6) + @assertEqual(1.002, values(1,2), tolerance=1.e-6) + @assertEqual(1.003, values(1,3), tolerance=1.e-6) + @assertEqual(1.004, values(1,4), tolerance=1.e-6) + @assertEqual(1.005, values(1,5), tolerance=1.e-6) + + ! check level 2 values + @assertEqual(2.001, values(2,1), tolerance=1.e-6) + @assertEqual(2.002, values(2,2), tolerance=1.e-6) + @assertEqual(2.003, values(2,3), tolerance=1.e-6) + @assertEqual(2.004, values(2,4), tolerance=1.e-6) + @assertEqual(2.005, values(2,5), tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_2d_variable_integer() + type(netcdf_file_type) f + integer, allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + allocate(values(5)) + call f%read_var(sss_varindex, [1,2], [5,1], values) + ! check level 1 values + @assertEqual(10, values(1)) + @assertEqual(10, values(2)) + @assertEqual(10, values(3)) + @assertEqual(10, values(4)) + @assertEqual(10, values(5)) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_single_variable_integer() + type(netcdf_file_type) f + integer value + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var1(sss_varindex, [1,2], value) + ! check level 1 values + @assertEqual(10, value) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_single_variable_real4() + type(netcdf_file_type) f + real(4) value + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var1(sss_varindex, [1,2], value) + ! check level 1 values + @assertEqual(10.001, value, tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_single_variable_real8() + type(netcdf_file_type) f + real(8) value + + integer node_dimidx, time_dimidx + integer sss_varindex + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + sss_varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var1(sss_varindex, [1,2], value) + ! check level 1 values + @assertEqual(10.001_8, value, tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_variable_with_less_dims_than_in_file() + type(netcdf_file_type) f + real(8) value + + integer node_dimidx, time_dimidx, unused_dimidx + integer varindex + call f%initialize() + ! 2 dims in file + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + ! read var with 1 dim + varindex = f%add_var_double("time", [time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var(varindex, [1], [1], value) + @assertEqual(10800.0_8, value, tolerance=1.e-6) + + call f%close_file() + end subroutine + + + @test + subroutine test_can_create_empty_file() + type(netcdf_file_type) f + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_empty_file.nc" + + call execute_command_line("rm -f "//filepath) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + call f%open_write_create(filepath) + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, '}') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_create_file_with_global_attributes_text() + type(netcdf_file_type) f + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_with_global_attributes_text.nc" + + call execute_command_line("rm -f "//filepath) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + call f%add_global_att("FESOM_model", "FESOM2") + call f%open_write_create(filepath) + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, ':FESOM_model = "FESOM2" ;') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_create_file_with_global_attributes_int() + type(netcdf_file_type) f + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_with_global_attributes_int.nc" + + call execute_command_line("rm -f "//filepath) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + call f%add_global_att("FESOM_force_rotation", 0) + call f%open_write_create(filepath) + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, ':FESOM_force_rotation = 0 ;') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_create_file_with_dims_and_vars() + type(netcdf_file_type) f + integer z_dimidx, time_dimidx + integer varindex, time_varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_with_dims_and_vars.nc" + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + time_varindex = f%add_var_double("time", [time_dimidx]) + varindex = f%add_var_real("salt", [z_dimidx, time_dimidx]) + + call f%open_write_create(filepath) + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'nz1 = 3 ;') ) + @assertTrue( is_in_file_header(filepath, 'time = UNLIMITED') ) + @assertTrue( is_in_file_header(filepath, 'double time(time) ;') ) + @assertTrue( is_in_file_header(filepath, 'float salt(time, nz1) ;') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_create_file_and_var_text_attributes() + type(netcdf_file_type) f + integer z_dimidx, time_dimidx + integer varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_and_var_text_attributes.nc" + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + varindex = f%add_var_real("salt", [z_dimidx, time_dimidx]) + call f%add_var_att(varindex, "units", "psu") + call f%add_var_att(varindex, "long_name", "sea surface salinity") + + call f%open_write_create(filepath) + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'salt:units = "psu"') ) + @assertTrue( is_in_file_header(filepath, 'salt:long_name = "sea surface salinity"') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_create_file_and_var_int_attributes() + type(netcdf_file_type) f + integer z_dimidx, time_dimidx + integer varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_create_file_and_var_int_attributes.nc" + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call f%initialize() + z_dimidx = f%add_dim("nz1", 3) + time_dimidx = f%add_dim_unlimited("time") + varindex = f%add_var_real("salt", [z_dimidx, time_dimidx]) + call f%add_var_att(varindex, "number", 42) + + call f%open_write_create(filepath) + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'salt:number = 42 ;') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_write_to_new_file_int() + type(netcdf_file_type) f + + integer time_dimidx + integer varindex, time_varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_write_to_new_file_int.nc" + + call f%initialize() + time_dimidx = f%add_dim_unlimited("time") + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + time_varindex = f%add_var_double("time", [time_dimidx]) + varindex = f%add_var_int("iter", [time_dimidx]) + call f%open_write_create(filepath) + + call f%write_var(time_varindex, [1], [1], [10800.0_8]) + call f%write_var(varindex, [1], [1], [42]) + + call f%close_file() + + @assertTrue( is_in_file(filepath, 'iter = 42 ;') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_append_to_existing_file_real4() + type(netcdf_file_type) f + real(4), allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer varindex, time_varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_append_to_existing_file_real4.nc" + + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call execute_command_line("cp fixtures/io_netcdf/columnwise_2d_sss.nc "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + + time_varindex = f%add_var_double("time", [time_dimidx]) + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_write_append(filepath) + + allocate(values(5)) + values(1) = 100.001 + values(2) = 100.002 + values(3) = 100.003 + values(4) = 100.004 + values(5) = 100.005 + + ! the file has 2 timesteps, we append a 3rd one + call f%write_var(time_varindex, [3], [1], [32400.0_8]) + call f%write_var(varindex, [1,3], [5,1], values) + + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'time = UNLIMITED ; // (3 currently)') ) + ! todo: check if the values have been written correctly + @assertTrue( is_in_file(filepath, '32400') ) + @assertTrue( is_in_file(filepath, '100.00') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_append_to_existing_file_real8() + type(netcdf_file_type) f + real(8), allocatable :: values(:) + + integer node_dimidx, time_dimidx + integer varindex, time_varindex + integer exitstat + character(len=*), parameter :: filepath = TMPPATHPREFIX//"test_can_append_to_existing_file_real8.nc" + + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + call execute_command_line("rm -f "//filepath, exitstat=exitstat) ! silently remove the file if it exists from an aborted previous run + + call execute_command_line("cp fixtures/io_netcdf/columnwise_2d_sss.nc "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + + time_varindex = f%add_var_double("time", [time_dimidx]) + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_write_append(filepath) + + allocate(values(5)) + values(1) = 100.001_8 + values(2) = 100.002_8 + values(3) = 100.003_8 + values(4) = 100.004_8 + values(5) = 100.005_8 + + ! the file has 2 timesteps, we append a 3rd one + call f%write_var(time_varindex, [3], [1], [32400.0_8]) + call f%write_var(varindex, [1,3], [5,1], values) + + call f%close_file() + + ! test if the file has been written correctly + @assertTrue( is_in_file_header(filepath, 'time = UNLIMITED ; // (3 currently)') ) + ! todo: check if the values have been written correctly + @assertTrue( is_in_file(filepath, '32400') ) + @assertTrue( is_in_file(filepath, '100.00') ) + + call execute_command_line("rm "//filepath, exitstat=exitstat) + if(exitstat .ne. 0) stop 1 + end subroutine + + + @test + subroutine test_can_read_var_shape_from_file() + type(netcdf_file_type) f + integer, allocatable :: varshape(:) + integer node_dimidx, time_dimidx + integer varindex + + call f%initialize() + node_dimidx = f%add_dim("nod2", 5) + time_dimidx = f%add_dim_unlimited("time") + + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var_shape(varindex, varshape) + @assertEqual([5,2], varshape) + call f%close_file() + end subroutine + + + @test + subroutine test_can_read_var_shape_from_file_with_reverse_dim_index_order() + type(netcdf_file_type) f + integer, allocatable :: varshape(:) + integer node_dimidx, time_dimidx + integer varindex + + call f%initialize() + time_dimidx = f%add_dim_unlimited("time") + node_dimidx = f%add_dim("nod2", 5) + + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + call f%read_var_shape(varindex, varshape) + @assertEqual([5,2], varshape) + call f%close_file() + end subroutine + + + @test + subroutine test_file_is_attached_is_false_after_initializing() + type(netcdf_file_type) f + + call f%initialize() + @assertFalse(f%is_attached()) + end subroutine + + + @test + subroutine test_file_is_attached_is_true_after_opening_a_file() + type(netcdf_file_type) f + integer, allocatable :: varshape(:) + integer node_dimidx, time_dimidx + integer varindex + + call f%initialize() + time_dimidx = f%add_dim_unlimited("time") + node_dimidx = f%add_dim("nod2", 5) + + varindex = f%add_var_real("sss", [node_dimidx,time_dimidx]) + + call f%open_read("fixtures/io_netcdf/columnwise_2d_sss.nc") + + call f%read_var_shape(varindex, varshape) + @assertTrue(f%is_attached()) + call f%close_file() + end subroutine + +end module diff --git a/test/fortran/io_netcdf_module_tests.pf b/test/fortran/io_netcdf_module_tests.pf new file mode 100644 index 000000000..c70d9f1c2 --- /dev/null +++ b/test/fortran/io_netcdf_module_tests.pf @@ -0,0 +1,122 @@ +module io_netcdf_module_tests + use io_netcdf_module + use funit; implicit none + +contains + + + @test + subroutine test_can_read_2d_variable_real8() + type(netcdf_variable_handle) handle + real(8), allocatable :: values(:) + + call handle%initialize("fixtures/io_netcdf/columnwise_2d_sss.nc", "sss") + + allocate(values(5)) + + call handle%read_values(1, values) + ! check level 1 values + @assertEqual(1.001, values(1), tolerance=1.e-6) + @assertEqual(1.002, values(2), tolerance=1.e-6) + @assertEqual(1.003, values(3), tolerance=1.e-6) + @assertEqual(1.004, values(4), tolerance=1.e-6) + @assertEqual(1.005, values(5), tolerance=1.e-6) + + call handle%finalize() + end subroutine + + + @test + subroutine test_can_read_2d_variable_real4() + type(netcdf_variable_handle) handle + real(4), allocatable :: values(:) + + call handle%initialize("fixtures/io_netcdf/columnwise_2d_sss.nc", "sss") + + allocate(values(5)) + + call handle%read_values(1, values) + ! check level 1 values + @assertEqual(1.001, values(1), tolerance=1.e-6) + @assertEqual(1.002, values(2), tolerance=1.e-6) + @assertEqual(1.003, values(3), tolerance=1.e-6) + @assertEqual(1.004, values(4), tolerance=1.e-6) + @assertEqual(1.005, values(5), tolerance=1.e-6) + + call handle%finalize() + end subroutine + + + @test + subroutine test_can_read_3d_variable_real4() + type(netcdf_variable_handle) handle + real(4), allocatable :: values(:,:) + + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") + + allocate(values(3,5)) + + call handle%read_values(1, values) + ! check level 1 values + @assertEqual(1.001, values(1,1), tolerance=1.e-6) + @assertEqual(1.002, values(1,2), tolerance=1.e-6) + @assertEqual(1.003, values(1,3), tolerance=1.e-6) + @assertEqual(1.004, values(1,4), tolerance=1.e-6) + @assertEqual(1.005, values(1,5), tolerance=1.e-6) + + ! check level 2 values + @assertEqual(2.001, values(2,1), tolerance=1.e-6) + @assertEqual(2.002, values(2,2), tolerance=1.e-6) + @assertEqual(2.003, values(2,3), tolerance=1.e-6) + @assertEqual(2.004, values(2,4), tolerance=1.e-6) + @assertEqual(2.005, values(2,5), tolerance=1.e-6) + + call handle%finalize() + end subroutine + + + @test + subroutine test_can_read_number_of_timesteps() + type(netcdf_variable_handle) handle + + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") + @assertEqual(2,handle%number_of_timesteps()) + + call handle%finalize() + end subroutine + + + @test + subroutine test_can_read_number_of_dimensions() + type(netcdf_variable_handle) handle + + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") + @assertEqual(3,handle%number_of_dims()) + + call handle%finalize() + end subroutine + + + @test + subroutine test_can_read_dimension_size_at_index() + type(netcdf_variable_handle) handle + + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") + @assertEqual(3,handle%dimsize_at(1)) + @assertEqual(5,handle%dimsize_at(2)) + @assertEqual(2,handle%dimsize_at(3)) + + call handle%finalize() + end subroutine + + + @test + subroutine test_can_initialize_netcdf_variable_for_existing_file() + type(netcdf_variable_handle) handle + + call handle%initialize("fixtures/io_netcdf/columnwise_3d_salt.nc", "salt") + call handle%finalize() + end subroutine + + +end module diff --git a/test/fortran_parallel/CMakeLists.txt b/test/fortran_parallel/CMakeLists.txt index 88cc7cd34..95dfb08f6 100644 --- a/test/fortran_parallel/CMakeLists.txt +++ b/test/fortran_parallel/CMakeLists.txt @@ -11,8 +11,9 @@ target_include_directories(${LIB_TARGET} PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${C target_link_libraries(${LIB_TARGET} ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRARIES}) set_target_properties(${LIB_TARGET} PROPERTIES Fortran_MODULE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) +file(GLOB sources_pfunit RELATIVE ${CMAKE_CURRENT_LIST_DIR} ${CMAKE_CURRENT_LIST_DIR}/*.pf) add_pfunit_ctest (${PROJECT_NAME} - TEST_SOURCES mpi_topology_module_tests.pf # must be a path relative to CMAKE_CURRENT_SOURCE_DIR, then the generated files will be placed in CMAKE_CURRENT_BINARY_DIR (see add_pfunit_ctest.cmake) + TEST_SOURCES ${sources_pfunit} # must be a path relative to CMAKE_CURRENT_SOURCE_DIR, then the generated files will be placed in CMAKE_CURRENT_BINARY_DIR (see add_pfunit_ctest.cmake) LINK_LIBRARIES ${LIB_TARGET} MAX_PES 6 ) diff --git a/test/ifs_interface/check_exist.sh b/test/ifs_interface/check_exist.sh new file mode 100755 index 000000000..e89a674e1 --- /dev/null +++ b/test/ifs_interface/check_exist.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e + +FILE=./lib/libfesom.a +if [ -f "$FILE" ]; then + echo "$FILE compiled and linked." + exit 0 +else + echo "$FILE does not exist." + exit 1 +fi diff --git a/test/ifs_interface/configure_lib.sh b/test/ifs_interface/configure_lib.sh new file mode 100755 index 000000000..cf1609bcb --- /dev/null +++ b/test/ifs_interface/configure_lib.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# custom build script in use at ECMWF + +set -e + +LIB=no +while getopts "l" OPT +do + case "$OPT" in + l) LIB=yes;; + esac +done +shift $((OPTIND-1)) + +#cd ../../ +source env.sh ubuntu # source this from your run script too + +if [[ ${LIB} = yes ]]; then + mkdir build.lib || true # build dir for library + cd build.lib + cmake -DBUILD_FESOM_AS_LIBRARY=ON .. # not required when re-compiling +else + mkdir build || true # build dir for binary + cd build + cmake .. # not required when re-compiling +fi +make install -j`nproc --all` diff --git a/test/run_tests.sh b/test/run_tests.sh new file mode 100755 index 000000000..644ca21b8 --- /dev/null +++ b/test/run_tests.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -e +cd ../ + +machine="docker" +tests="test_pi test_souf test_pi_linfs test_pi_zstar test_pi_partial test_pi_floatice test_pi_visc7" + +./configure.sh ubuntu + +for test in $tests; do + +echo $test + mkrun pi $test -m $machine + pwd + cd work_pi + chmod +x job_docker_new + ./job_docker_new + echo "This was ${test}" + fcheck . + cd ../ + +done + +othertest="test_lib_compiles" + +for test in $othertest; do + + echo $othertest + ./test/ifs_interface/configure_lib.sh -l + + FILE=./lib/libfesom.a + if [ -f "$FILE" ]; then + echo "$FILE compiled and linked." + else + echo "$FILE does not exist." + fi +done diff --git a/test/scalability/read_netcdf/CMakeLists.txt b/test/scalability/read_netcdf/CMakeLists.txt new file mode 100644 index 000000000..1986d5c1a --- /dev/null +++ b/test/scalability/read_netcdf/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required(VERSION 3.4) + +project(read_netcdf Fortran) + +# get our source files +file(GLOB sources_Fortran ${CMAKE_CURRENT_LIST_DIR}/*.F90) + +include(${CMAKE_CURRENT_LIST_DIR}/../../../cmake/FindNETCDF.cmake) + +add_executable(${PROJECT_NAME} ${sources_Fortran} + ${CMAKE_CURRENT_LIST_DIR}/../../../src/io_netcdf_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../../src/io_netcdf_nf_interface.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../../src/io_netcdf_file_module.F90 + ${CMAKE_CURRENT_LIST_DIR}/../../../src/io_netcdf_attribute_module.F90 + ) + +if(${CMAKE_Fortran_COMPILER_ID} STREQUAL Intel ) + target_compile_options(${PROJECT_NAME} PRIVATE -r8 -i4 -fp-model precise -no-prec-div -no-prec-sqrt -fast-transcendentals -xHost -ip -init=zero -no-wrap-margin) +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL GNU ) + target_compile_options(${PROJECT_NAME} PRIVATE -O3 -finit-local-zero -finline-functions -march=native -fimplicit-none -fdefault-real-8 -ffree-line-length-none) + if(CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10 ) + target_compile_options(${PROJECT_NAME} PRIVATE -fallow-argument-mismatch) # gfortran v10 is strict about erroneous API calls: "Rank mismatch between actual argument at (1) and actual argument at (2) (scalar and rank-1)" + endif() +elseif(${CMAKE_Fortran_COMPILER_ID} STREQUAL Cray ) + target_compile_options(${PROJECT_NAME} PRIVATE -c -emf -hbyteswapio -hflex_mp=conservative -hfp1 -hadd_paren -Ounroll0 -hipa0 -r am -s real64) +endif() + +target_include_directories(${PROJECT_NAME} PRIVATE ${NETCDF_Fortran_INCLUDE_DIRECTORIES}) +target_link_libraries(${PROJECT_NAME} ${NETCDF_Fortran_LIBRARIES} ${NETCDF_C_LIBRARIES}) +set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE Fortran) diff --git a/view/scripts/iloveclock.py b/view/scripts/iloveclock.py index f52d632c9..45da4c3f3 100755 --- a/view/scripts/iloveclock.py +++ b/view/scripts/iloveclock.py @@ -16,12 +16,10 @@ Valid calendars 'standard', 'gregorian', 'proleptic_gregorian' 'noleap', '365_day', '360_day', 'julian', 'all_leap', '366_day' -Copyright (c) 2018, FESOM Development Team. +Copyright (c) 2018, 2022 FESOM Development Team. """ from netCDF4 import Dataset, num2date -from datetime import timedelta -from datetime import datetime import sys filename = sys.argv[1] @@ -31,32 +29,19 @@ calendar = '365_day' f = Dataset(filename) -# a = num2date(f.variables['time'][:], f.variables['time'].units, '365_day') print(20*'*') print('CALENDAR: ' + calendar) print(20*'*') for nstamp in range(f.variables['time'].shape[0]): - sstamp = num2date(f.variables['time'][nstamp], f.variables['time'].units, calendar) - delta = (60 - sstamp.minute)*60 - estamp = num2date(f.variables['time'][:][nstamp] + delta, f.variables['time'].units, calendar) - seconds_in_day_s = sstamp.hour*3600+sstamp.minute*60 - seconds_in_day_e = estamp.hour*3600+estamp.minute*60 - - if calendar in ['noleap', '365_day', '360_day', '366_day']: - print(sstamp) - print("{:5d} {:10d} {:10d}".format(seconds_in_day_s, sstamp.dayofyr, sstamp.year)) - print("{:5d} {:10d} {:10d}".format(seconds_in_day_e, estamp.dayofyr, estamp.year)) - print(20*'*') - else: - print(sstamp) - print("{:5d} {:10d} {:10d}".format(seconds_in_day_s, sstamp.timetuple().tm_yday, sstamp.year)) - print("{:5d} {:10d} {:10d}".format(seconds_in_day_e, estamp.timetuple().tm_yday, estamp.year)) - print(20*'*') - -print(20*'*') -print('CALENDAR: ' + calendar) -print(20*'*') + estamp = num2date(f.variables['time'][:][nstamp], f.variables['time'].units, calendar) + sstamp = int(f.variables['time'][nstamp]) + day = (sstamp//86400)+1 + seconds = sstamp%86400 + print(sstamp) + print("{:5d} {:10d} {:10d}".format(seconds, day, estamp.year)) + print("{:5d} {:10d} {:10d}".format(86400, day, estamp.year)) + print(20*'*') f.close() \ No newline at end of file diff --git a/view_pscholz/sub_climatology.py b/view_pscholz/sub_climatology.py index aa868775a..dfe5a8aa6 100644 --- a/view_pscholz/sub_climatology.py +++ b/view_pscholz/sub_climatology.py @@ -92,7 +92,7 @@ def __init__(self,path,fname,var=[]): depth3d = np.zeros(valueS.shape) for di in range(0,self.depth.size): depth3d[di,:,:] = self.depth[di] - self.value = sw.ptmp(valueS, valueT, depth3d) + self.value = sw.ptmp(valueS, valueT, np.abs(depth3d)) elif var=='salt': self.value = valueS diff --git a/view_pscholz/sub_fesom_mesh.py b/view_pscholz/sub_fesom_mesh.py index 2aefb37ff..f3fcc336a 100644 --- a/view_pscholz/sub_fesom_mesh.py +++ b/view_pscholz/sub_fesom_mesh.py @@ -130,7 +130,8 @@ def __init__(self,inputarray): #_______________________________________________________________________ # remove+augment periodic boundary - self.fesom_remove_pbnd() + if (inputarray['mesh_remove_cyc' ] == True): + self.fesom_remove_pbnd() # calculate fesom land mask interactivly #self.fesom_calc_landmask() @@ -285,9 +286,9 @@ def fesom_grid_rot_r2g(self,str_mode='r2g'): #_______________________________________________________________________ # make inverse of rotation matrix - if (str_mode == 'r2g') or (str_mode == 'focus'): - from numpy.linalg import inv - rotate_matrix=inv(rotate_matrix); +# if (str_mode == 'r2g') or (str_mode == 'focus'): +# from numpy.linalg import inv +# rotate_matrix=inv(rotate_matrix); #____3D_________________________________________________________________ # calculate Cartesian coordinates @@ -303,9 +304,14 @@ def fesom_grid_rot_r2g(self,str_mode='r2g'): #_______________________________________________________________________ # rotate to geographical cartesian coordinates: - xg=rotate_matrix[0,0]*xr + rotate_matrix[0,1]*yr + rotate_matrix[0,2]*zr; - yg=rotate_matrix[1,0]*xr + rotate_matrix[1,1]*yr + rotate_matrix[1,2]*zr; - zg=rotate_matrix[2,0]*xr + rotate_matrix[2,1]*yr + rotate_matrix[2,2]*zr; + if (str_mode == 'r2g') or (str_mode == 'focus'): + xg=rotate_matrix[0,0]*xr + rotate_matrix[1,0]*yr + rotate_matrix[2,0]*zr; + yg=rotate_matrix[0,1]*xr + rotate_matrix[1,1]*yr + rotate_matrix[2,1]*zr; + zg=rotate_matrix[0,2]*xr + rotate_matrix[1,2]*yr + rotate_matrix[2,2]*zr; + else: + xg=rotate_matrix[0,0]*xr + rotate_matrix[0,1]*yr + rotate_matrix[0,2]*zr; + yg=rotate_matrix[1,0]*xr + rotate_matrix[1,1]*yr + rotate_matrix[1,2]*zr; + zg=rotate_matrix[2,0]*xr + rotate_matrix[2,1]*yr + rotate_matrix[2,2]*zr; ##______________________________________________________________________ #self.nodes_2d_yg = np.degrees(np.arcsin(zg)); diff --git a/work/job_ecmwf b/work/job_ecmwf new file mode 100755 index 000000000..287f8e442 --- /dev/null +++ b/work/job_ecmwf @@ -0,0 +1,47 @@ +#!/bin/bash +#PBS -S /usr/bin/ksh +#PBS -N fesom2-LandG +#PBS -q np +#PBS -l EC_total_tasks=288 + +# optionally, specifiy that no OpenMP is used +#PBS -l EC_threads_per_task=1 + +#PBS -l EC_hyperthreading=1 +#PBS -l EC_user_defined_priority=99 +#PBS -l walltime=00:57:00 + +##PBS -j oe #join out and err +#PBD -n +#PBS -o /scratch/rd/natr/run_core2_LandG/pbs.out +#PBS -e /scratch/rd/natr/run_core2_LandG/pbs.err + +#PBS -m abe +#PBS -M thomas.rackow@ecmwf.int + +#queue suitable for target processors min/max processors per node memory limit wall-clock +#np parallel MOM+CN 1/72 not shared 72 120 GB 48 hours + +path=`pwd` +echo Initial path: $path + +cd /scratch/rd/natr/run_core2_LandG/ + +# debug +set -x + +cp $HOME/fesom2/bin/fesom.x . # +# did manually +#cp -n $HOME/fesom2/config/namelist.config . # +#cp -n $HOME/fesom2/config/namelist.cvmix . # +#cp -n $HOME/fesom2/config/namelist.forcing . # +#cp -n $HOME/fesom2/config/namelist.oce . # +#cp -n $HOME/fesom2/config/namelist.io . # +#cp -n $HOME/fesom2/config/namelist.ice . # +#cp -n $HOME/fesom2/config/namelist.tra . # +#cp -n $HOME/fesom2/config/namelist.dyn . # + +date +echo tasks_per_node, total_tasks, HT: $EC_tasks_per_node $EC_total_tasks $EC_hyperthreads +aprun -N $EC_tasks_per_node -n $EC_total_tasks -j $EC_hyperthreads ./fesom.x > "fesom2.out" +date diff --git a/work/job_ini_levante b/work/job_ini_levante new file mode 100755 index 000000000..c84232fab --- /dev/null +++ b/work/job_ini_levante @@ -0,0 +1,38 @@ +#!/bin/bash +#SBATCH --job-name=ref +#SBATCH -p compute +#SBATCH --ntasks-per-node=1 +#SBATCH --ntasks=1 +#SBATCH --time=01:00:00 +#SBATCH -o slurm-out.out +#SBATCH -e slurm-err.out +#SBATCH -A ab0995 + +source /sw/etc/profile.levante +source ../env/levante.dkrz.de/shell + +ulimit -s 102400 + +echo Submitted job: $jobid +squeue -u $USER + +# determine JOBID +JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` + +ln -s ../bin/fesom_ini.x . # cp -n ../bin/fesom.x +cp -n ../config/namelist.config . +cp -n ../config/namelist.forcing . +cp -n ../config/namelist.oce . +cp -n ../config/namelist.ice . +cp -n ../config/namelist.icepack . + +date +srun -l fesom_ini.x > "fesom_ini.out" +date + +# qstat -f $PBS_JOBID +#export EXITSTATUS=$? +#if [ ${EXITSTATUS} -eq 0 ] || [ ${EXITSTATUS} -eq 127 ] ; then +#sbatch job_mistral +#fi + diff --git a/work/job_levante b/work/job_levante new file mode 100755 index 000000000..3e45afcb0 --- /dev/null +++ b/work/job_levante @@ -0,0 +1,38 @@ +#!/bin/bash +#SBATCH --job-name=ref +#SBATCH -p compute +#SBATCH --ntasks-per-node=128 +#SBATCH --ntasks=512 +#SBATCH --time=08:00:00 +#SBATCH -o slurm-out.out +#SBATCH -e slurm-err.out +#SBATCH -A ab0995 + +source /sw/etc/profile.levante +source ../env/levante.dkrz.de/shell + +ulimit -s 102400 + +echo Submitted job: $jobid +squeue -u $USER + +# determine JOBID +JOBID=`echo $SLURM_JOB_ID |cut -d"." -f1` + +ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x +cp -n ../config/namelist.config . +cp -n ../config/namelist.forcing . +cp -n ../config/namelist.oce . +cp -n ../config/namelist.ice . +cp -n ../config/namelist.icepack . + +date +srun -l fesom.x > "fesom2.0.out" +date + +# qstat -f $PBS_JOBID +#export EXITSTATUS=$? +#if [ ${EXITSTATUS} -eq 0 ] || [ ${EXITSTATUS} -eq 127 ] ; then +#sbatch job_mistral +#fi + diff --git a/work/job_ubuntu b/work/job_ubuntu index 34b9e6535..a5a3466e4 100755 --- a/work/job_ubuntu +++ b/work/job_ubuntu @@ -1,18 +1,20 @@ #!/bin/bash +set -e + ulimit -s unlimited -# determine JOBID +export OMP_NUM_THREADS=1 -ln -s ../bin/fesom.x . # cp -n ../bin/fesom.x +ln -sf ../bin/fesom.x . # cp -n ../bin/fesom.x cp -n ../config/namelist.config . cp -n ../config/namelist.forcing . cp -n ../config/namelist.oce . cp -n ../config/namelist.ice . -cp -n ../config/namelist.io . +cp -n ../config/namelist.io . date -mpirun --allow-run-as-root --mca btl_vader_single_copy_mechanism none -n 2 fesom.x +time mpirun --allow-run-as-root --mca btl_vader_single_copy_mechanism none -n 2 fesom.x date